repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
arter97-temasek-i9300/android_kernel_samsung_smdk4412 | arch/mips/jz4740/setup.c | 7575 | 1529 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* Copyright (C) 2011, Maarten ter Huurne <maarten@treewalker.org>
* JZ4740 setup code
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <asm/mach-jz4740/base.h>
#include "reset.h"
#define JZ4740_EMC_SDRAM_CTRL 0x80
static void __init jz4740_detect_mem(void)
{
void __iomem *jz_emc_base;
u32 ctrl, bus, bank, rows, cols;
phys_t size;
jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100);
ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL);
bus = 2 - ((ctrl >> 31) & 1);
bank = 1 + ((ctrl >> 19) & 1);
cols = 8 + ((ctrl >> 26) & 7);
rows = 11 + ((ctrl >> 20) & 3);
printk(KERN_DEBUG
"SDRAM preconfigured: bus:%u bank:%u rows:%u cols:%u\n",
bus, bank, rows, cols);
iounmap(jz_emc_base);
size = 1 << (bus + bank + cols + rows);
add_memory_region(0, size, BOOT_MEM_RAM);
}
void __init plat_mem_setup(void)
{
jz4740_reset_init();
jz4740_detect_mem();
}
const char *get_system_type(void)
{
return "JZ4740";
}
| gpl-2.0 |
shane87/android_kernel_lge_g3 | fs/utimes.c | 7575 | 5791 | #include <linux/compiler.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/utime.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#ifdef __ARCH_WANT_SYS_UTIME
/*
* sys_utime() can be implemented in user-level using sys_utimes().
* Is this for backwards compatibility? If so, why not move it
* into the appropriate arch directory (for those architectures that
* need it).
*/
/* If times==NULL, set access and modification to current time,
* must be owner or have write permission.
* Else, update from *times, must be owner or super user.
*/
SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
{
struct timespec tv[2];
if (times) {
if (get_user(tv[0].tv_sec, ×->actime) ||
get_user(tv[1].tv_sec, ×->modtime))
return -EFAULT;
tv[0].tv_nsec = 0;
tv[1].tv_nsec = 0;
}
return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
}
#endif
static bool nsec_valid(long nsec)
{
if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
return true;
return nsec >= 0 && nsec <= 999999999;
}
static int utimes_common(struct path *path, struct timespec *times)
{
int error;
struct iattr newattrs;
struct inode *inode = path->dentry->d_inode;
error = mnt_want_write(path->mnt);
if (error)
goto out;
if (times && times[0].tv_nsec == UTIME_NOW &&
times[1].tv_nsec == UTIME_NOW)
times = NULL;
newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME;
if (times) {
if (times[0].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_ATIME;
else if (times[0].tv_nsec != UTIME_NOW) {
newattrs.ia_atime.tv_sec = times[0].tv_sec;
newattrs.ia_atime.tv_nsec = times[0].tv_nsec;
newattrs.ia_valid |= ATTR_ATIME_SET;
}
if (times[1].tv_nsec == UTIME_OMIT)
newattrs.ia_valid &= ~ATTR_MTIME;
else if (times[1].tv_nsec != UTIME_NOW) {
newattrs.ia_mtime.tv_sec = times[1].tv_sec;
newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
newattrs.ia_valid |= ATTR_MTIME_SET;
}
/*
* Tell inode_change_ok(), that this is an explicit time
* update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET
* were used.
*/
newattrs.ia_valid |= ATTR_TIMES_SET;
} else {
/*
* If times is NULL (or both times are UTIME_NOW),
* then we need to check permissions, because
* inode_change_ok() won't do it.
*/
error = -EACCES;
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
goto mnt_drop_write_and_out;
}
}
mutex_lock(&inode->i_mutex);
error = notify_change(path->dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
mnt_drop_write_and_out:
mnt_drop_write(path->mnt);
out:
return error;
}
/*
* do_utimes - change times on filename or file descriptor
* @dfd: open file descriptor, -1 or AT_FDCWD
* @filename: path name or NULL
* @times: new times or NULL
* @flags: zero or more flags (only AT_SYMLINK_NOFOLLOW for the moment)
*
* If filename is NULL and dfd refers to an open file, then operate on
* the file. Otherwise look up filename, possibly using dfd as a
* starting point.
*
* If times==NULL, set access and modification to current time,
* must be owner or have write permission.
* Else, update from *times, must be owner or super user.
*/
long do_utimes(int dfd, const char __user *filename, struct timespec *times,
int flags)
{
int error = -EINVAL;
if (times && (!nsec_valid(times[0].tv_nsec) ||
!nsec_valid(times[1].tv_nsec))) {
goto out;
}
if (flags & ~AT_SYMLINK_NOFOLLOW)
goto out;
if (filename == NULL && dfd != AT_FDCWD) {
struct file *file;
if (flags & AT_SYMLINK_NOFOLLOW)
goto out;
file = fget(dfd);
error = -EBADF;
if (!file)
goto out;
error = utimes_common(&file->f_path, times);
fput(file);
} else {
struct path path;
int lookup_flags = 0;
if (!(flags & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
error = user_path_at(dfd, filename, lookup_flags, &path);
if (error)
goto out;
error = utimes_common(&path, times);
path_put(&path);
}
out:
return error;
}
SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
struct timespec __user *, utimes, int, flags)
{
struct timespec tstimes[2];
if (utimes) {
if (copy_from_user(&tstimes, utimes, sizeof(tstimes)))
return -EFAULT;
/* Nothing to do, we must not even check the path. */
if (tstimes[0].tv_nsec == UTIME_OMIT &&
tstimes[1].tv_nsec == UTIME_OMIT)
return 0;
}
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename,
struct timeval __user *, utimes)
{
struct timeval times[2];
struct timespec tstimes[2];
if (utimes) {
if (copy_from_user(×, utimes, sizeof(times)))
return -EFAULT;
/* This test is needed to catch all invalid values. If we
would test only in do_utimes we would miss those invalid
values truncated by the multiplication with 1000. Note
that we also catch UTIME_{NOW,OMIT} here which are only
valid for utimensat. */
if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 ||
times[1].tv_usec >= 1000000 || times[1].tv_usec < 0)
return -EINVAL;
tstimes[0].tv_sec = times[0].tv_sec;
tstimes[0].tv_nsec = 1000 * times[0].tv_usec;
tstimes[1].tv_sec = times[1].tv_sec;
tstimes[1].tv_nsec = 1000 * times[1].tv_usec;
}
return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0);
}
SYSCALL_DEFINE2(utimes, char __user *, filename,
struct timeval __user *, utimes)
{
return sys_futimesat(AT_FDCWD, filename, utimes);
}
| gpl-2.0 |
vikrant82/t320_kernel | net/mac80211/spectmgmt.c | 7831 | 2891 | /*
* spectrum management
*
* Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2008, Intel Corporation
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "wme.h"
static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
struct ieee80211_msrment_ie *request_ie,
const u8 *da, const u8 *bssid,
u8 dialog_token)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *msr_report;
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
if (!skb)
return;
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
memset(msr_report, 0, 24);
memcpy(msr_report->da, da, ETH_ALEN);
memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
msr_report->u.action.u.measurement.action_code =
WLAN_ACTION_SPCT_MSR_RPRT;
msr_report->u.action.u.measurement.dialog_token = dialog_token;
msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
msr_report->u.action.u.measurement.length =
sizeof(struct ieee80211_msrment_ie);
memset(&msr_report->u.action.u.measurement.msr_elem, 0,
sizeof(struct ieee80211_msrment_ie));
msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
msr_report->u.action.u.measurement.msr_elem.mode |=
IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
size_t len)
{
/*
* Ignoring measurement request is spec violation.
* Mandatory measurements must be reported optional
* measurements might be refused or reported incapable
* For now just refuse
* TODO: Answer basic measurement as unmeasured
*/
ieee80211_send_refuse_measurement_request(sdata,
&mgmt->u.action.u.measurement.msr_elem,
mgmt->sa, mgmt->bssid,
mgmt->u.action.u.measurement.dialog_token);
}
| gpl-2.0 |
percy-g2/bbbandroid-kernel | fs/f2fs/xattr.c | 152 | 10881 | /*
* fs/f2fs/xattr.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Portions of this code from linux/fs/ext2/xattr.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
*
* Fix by Harrison Xing <harrison@mountainviewdata.com>.
* Extended attributes for symlinks and special files added per
* suggestion of Luka Renko <luka.renko@hermes.si>.
* xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
* Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/rwsem.h>
#include <linux/f2fs_fs.h>
#include "f2fs.h"
#include "xattr.h"
static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
size_t list_size, const char *name, size_t name_len, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
int total_len, prefix_len = 0;
const char *prefix = NULL;
switch (type) {
case F2FS_XATTR_INDEX_USER:
if (!test_opt(sbi, XATTR_USER))
return -EOPNOTSUPP;
prefix = XATTR_USER_PREFIX;
prefix_len = XATTR_USER_PREFIX_LEN;
break;
case F2FS_XATTR_INDEX_TRUSTED:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
prefix = XATTR_TRUSTED_PREFIX;
prefix_len = XATTR_TRUSTED_PREFIX_LEN;
break;
default:
return -EINVAL;
}
total_len = prefix_len + name_len + 1;
if (list && total_len <= list_size) {
memcpy(list, prefix, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
return total_len;
}
static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
switch (type) {
case F2FS_XATTR_INDEX_USER:
if (!test_opt(sbi, XATTR_USER))
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
break;
default:
return -EINVAL;
}
if (strcmp(name, "") == 0)
return -EINVAL;
return f2fs_getxattr(dentry->d_inode, type, name,
buffer, size);
}
static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
switch (type) {
case F2FS_XATTR_INDEX_USER:
if (!test_opt(sbi, XATTR_USER))
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
break;
default:
return -EINVAL;
}
if (strcmp(name, "") == 0)
return -EINVAL;
return f2fs_setxattr(dentry->d_inode, type, name, value, size);
}
static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
size_t list_size, const char *name, size_t name_len, int type)
{
const char *xname = F2FS_SYSTEM_ADVISE_PREFIX;
size_t size;
if (type != F2FS_XATTR_INDEX_ADVISE)
return 0;
size = strlen(xname) + 1;
if (list && size <= list_size)
memcpy(list, xname, size);
return size;
}
static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
struct inode *inode = dentry->d_inode;
if (strcmp(name, "") != 0)
return -EINVAL;
*((char *)buffer) = F2FS_I(inode)->i_advise;
return sizeof(char);
}
static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
struct inode *inode = dentry->d_inode;
if (strcmp(name, "") != 0)
return -EINVAL;
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value == NULL)
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
return 0;
}
const struct xattr_handler f2fs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.flags = F2FS_XATTR_INDEX_USER,
.list = f2fs_xattr_generic_list,
.get = f2fs_xattr_generic_get,
.set = f2fs_xattr_generic_set,
};
const struct xattr_handler f2fs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.flags = F2FS_XATTR_INDEX_TRUSTED,
.list = f2fs_xattr_generic_list,
.get = f2fs_xattr_generic_get,
.set = f2fs_xattr_generic_set,
};
const struct xattr_handler f2fs_xattr_advise_handler = {
.prefix = F2FS_SYSTEM_ADVISE_PREFIX,
.flags = F2FS_XATTR_INDEX_ADVISE,
.list = f2fs_xattr_advise_list,
.get = f2fs_xattr_advise_get,
.set = f2fs_xattr_advise_set,
};
static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
[F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &f2fs_xattr_acl_access_handler,
[F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler,
#endif
[F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
};
const struct xattr_handler *f2fs_xattr_handlers[] = {
&f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
&f2fs_xattr_acl_access_handler,
&f2fs_xattr_acl_default_handler,
#endif
&f2fs_xattr_trusted_handler,
&f2fs_xattr_advise_handler,
NULL,
};
static inline const struct xattr_handler *f2fs_xattr_handler(int name_index)
{
const struct xattr_handler *handler = NULL;
if (name_index > 0 && name_index < ARRAY_SIZE(f2fs_xattr_handler_map))
handler = f2fs_xattr_handler_map[name_index];
return handler;
}
int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
void *buffer, size_t buffer_size)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *entry;
struct page *page;
void *base_addr;
int error = 0, found = 0;
size_t value_len, name_len;
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
if (!fi->i_xattr_nid)
return -ENODATA;
page = get_node_page(sbi, fi->i_xattr_nid);
base_addr = page_address(page);
list_for_each_xattr(entry, base_addr) {
if (entry->e_name_index != name_index)
continue;
if (entry->e_name_len != name_len)
continue;
if (!memcmp(entry->e_name, name, name_len)) {
found = 1;
break;
}
}
if (!found) {
error = -ENODATA;
goto cleanup;
}
value_len = le16_to_cpu(entry->e_value_size);
if (buffer && value_len > buffer_size) {
error = -ERANGE;
goto cleanup;
}
if (buffer) {
char *pval = entry->e_name + entry->e_name_len;
memcpy(buffer, pval, value_len);
}
error = value_len;
cleanup:
f2fs_put_page(page, 1);
return error;
}
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = dentry->d_inode;
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *entry;
struct page *page;
void *base_addr;
int error = 0;
size_t rest = buffer_size;
if (!fi->i_xattr_nid)
return 0;
page = get_node_page(sbi, fi->i_xattr_nid);
base_addr = page_address(page);
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
f2fs_xattr_handler(entry->e_name_index);
size_t size;
if (!handler)
continue;
size = handler->list(dentry, buffer, rest, entry->e_name,
entry->e_name_len, handler->flags);
if (buffer && size > rest) {
error = -ERANGE;
goto cleanup;
}
if (buffer)
buffer += size;
rest -= size;
}
error = buffer_size - rest;
cleanup:
f2fs_put_page(page, 1);
return error;
}
int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_header *header = NULL;
struct f2fs_xattr_entry *here, *last;
struct page *page;
void *base_addr;
int error, found, free, newsize;
size_t name_len;
char *pval;
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
if (value == NULL)
value_len = 0;
if (name_len > 255 || value_len > MAX_VALUE_LEN)
return -ERANGE;
f2fs_balance_fs(sbi);
mutex_lock_op(sbi, NODE_NEW);
if (!fi->i_xattr_nid) {
/* Allocate new attribute block */
struct dnode_of_data dn;
if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
mutex_unlock_op(sbi, NODE_NEW);
return -ENOSPC;
}
set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
mark_inode_dirty(inode);
page = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(page)) {
alloc_nid_failed(sbi, fi->i_xattr_nid);
fi->i_xattr_nid = 0;
mutex_unlock_op(sbi, NODE_NEW);
return PTR_ERR(page);
}
alloc_nid_done(sbi, fi->i_xattr_nid);
base_addr = page_address(page);
header = XATTR_HDR(base_addr);
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
} else {
/* The inode already has an extended attribute block. */
page = get_node_page(sbi, fi->i_xattr_nid);
if (IS_ERR(page)) {
mutex_unlock_op(sbi, NODE_NEW);
return PTR_ERR(page);
}
base_addr = page_address(page);
header = XATTR_HDR(base_addr);
}
if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
error = -EIO;
goto cleanup;
}
/* find entry with wanted name. */
found = 0;
list_for_each_xattr(here, base_addr) {
if (here->e_name_index != name_index)
continue;
if (here->e_name_len != name_len)
continue;
if (!memcmp(here->e_name, name, name_len)) {
found = 1;
break;
}
}
last = here;
while (!IS_XATTR_LAST_ENTRY(last))
last = XATTR_NEXT_ENTRY(last);
newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) +
name_len + value_len);
/* 1. Check space */
if (value) {
/* If value is NULL, it is remove operation.
* In case of update operation, we caculate free.
*/
free = MIN_OFFSET - ((char *)last - (char *)header);
if (found)
free = free - ENTRY_SIZE(here);
if (free < newsize) {
error = -ENOSPC;
goto cleanup;
}
}
/* 2. Remove old entry */
if (found) {
/* If entry is found, remove old entry.
* If not found, remove operation is not needed.
*/
struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
int oldsize = ENTRY_SIZE(here);
memmove(here, next, (char *)last - (char *)next);
last = (struct f2fs_xattr_entry *)((char *)last - oldsize);
memset(last, 0, oldsize);
}
/* 3. Write new entry */
if (value) {
/* Before we come here, old entry is removed.
* We just write new entry. */
memset(last, 0, newsize);
last->e_name_index = name_index;
last->e_name_len = name_len;
memcpy(last->e_name, name, name_len);
pval = last->e_name + name_len;
memcpy(pval, value, value_len);
last->e_value_size = cpu_to_le16(value_len);
}
set_page_dirty(page);
f2fs_put_page(page, 1);
if (is_inode_flag_set(fi, FI_ACL_MODE)) {
inode->i_mode = fi->i_acl_mode;
inode->i_ctime = CURRENT_TIME;
clear_inode_flag(fi, FI_ACL_MODE);
}
f2fs_write_inode(inode, NULL);
mutex_unlock_op(sbi, NODE_NEW);
return 0;
cleanup:
f2fs_put_page(page, 1);
mutex_unlock_op(sbi, NODE_NEW);
return error;
}
| gpl-2.0 |
cleech/linux | arch/powerpc/kernel/ptrace/ptrace-adv.c | 152 | 13940 | // SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include "ptrace-decl.h"
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
{
dbginfo->version = 1;
dbginfo->num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo->num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo->num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo->data_bp_alignment = 4;
dbginfo->sizeof_condition = 4;
dbginfo->features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_DAC_RANGE))
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
}
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
unsigned long __user *datalp)
{
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
return -EINVAL;
return put_user(child->thread.debug.dac1, datalp);
}
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.debug.dac1 = data & ~0x3UL;
if (task->thread.debug.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */
task->thread.debug.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0 accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
task->thread.regs->msr |= MSR_DE;
return 0;
}
static long set_instruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if (!slot1_in_use && !slot2_in_use) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.iac2 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.iac4 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else {
return -ENOSPC;
}
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || slot3_in_use == slot4_in_use) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.debug.iac2 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.debug.iac4 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC4;
#endif
} else {
return -ENOSPC;
}
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.debug.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.debug.iac1 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.debug.iac2 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.debug.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.debug.iac3 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.debug.iac4 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && condition_mode == 0)
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else {
return -ENOSPC;
}
child->thread.debug.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.debug.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
child->thread.debug.dac2 = 0;
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc2 = 0;
#endif
child->thread.debug.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else {
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.debug.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.debug.dac1 = bp_info->addr;
child->thread.debug.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
if (bp_info->version != 1)
return -ENOTSUPP;
/*
* Check for invalid flags and combinations
*/
if (bp_info->trigger_type == 0 ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
return set_instruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
}
long ppc_del_hwdebug(struct task_struct *child, long data)
{
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
return rc;
}
| gpl-2.0 |
linuxium/ubuntu-yakkety | crypto/tcrypt.c | 152 | 51975 | /*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
* Adrian Hoban <adrian.hoban@intel.com>
* Gabriele Paoloni <gabriele.paoloni@intel.com>
* Tadeusz Struk (tadeusz.struk@intel.com)
* Copyright (c) 2010, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include "tcrypt.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define TVMEMSIZE 4
/*
* Used by test_cipher_speed()
*/
#define ENCRYPT 1
#define DECRYPT 0
#define MAX_DIGEST_SIZE 64
/*
* return a string with the driver name
*/
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
/*
* Used by test_cipher_speed()
*/
static unsigned int sec;
static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
"lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
NULL
};
struct tcrypt_result {
struct completion completion;
int err;
};
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
reinit_completion(&tr->completion);
}
return ret;
}
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
return ret;
}
printk("%d operations in %d seconds (%ld bytes)\n",
bcount, secs, (long)bcount * blen);
return 0;
}
static int test_aead_cycles(struct aead_request *req, int enc, int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
local_irq_disable();
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
local_irq_enable();
if (ret == 0)
printk("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
#define XBUFSIZE 8
#define MAX_IVLEN 32
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
unsigned int buflen)
{
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
} else {
rem = buflen % PAGE_SIZE;
}
sg_init_table(sg, np + 1);
np--;
for (k = 0; k < np; k++)
sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
sg_set_buf(&sg[k + 1], xbuf[k], rem);
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize)
{
unsigned int i, j;
struct crypto_aead *tfm;
int ret = -ENOMEM;
const char *key;
struct aead_request *req;
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e;
void *assoc;
char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
unsigned int *b_size;
unsigned int iv_len;
struct tcrypt_result result;
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
goto out_noxbuf;
}
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
if (!sg)
goto out_nosg;
sgout = &sg[9];
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
goto out_notfm;
}
init_completion(&result.completion);
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
goto out_noreq;
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
i = 0;
do {
b_size = aead_sizes;
do {
assoc = axbuf[0];
memset(assoc, 0xff, aad_size);
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
*keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
ret = crypto_aead_setkey(tfm, key, *keysize);
ret = crypto_aead_setauthsize(tfm, authsize);
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
sg_init_aead(sg, xbuf,
*b_size + (enc ? authsize : 0));
sg_init_aead(sgout, xoutbuf,
*b_size + (enc ? authsize : 0));
sg_set_buf(&sg[0], assoc, aad_size);
sg_set_buf(&sgout[0], assoc, aad_size);
aead_request_set_crypt(req, sg, sgout, *b_size, iv);
aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
secs);
else
ret = test_aead_cycles(req, enc, *b_size);
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
aead_request_free(req);
out_noreq:
crypto_free_aead(tfm);
out_notfm:
kfree(sg);
out_nosg:
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
return;
}
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
}
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
ret = tr->err;
}
return ret;
}
struct test_mb_ahash_data {
struct scatterlist sg[TVMEMSIZE];
char result[64];
struct ahash_request *req;
struct tcrypt_result tresult;
char *xbuf[XBUFSIZE];
};
static void test_mb_ahash_speed(const char *algo, unsigned int sec,
struct hash_speed *speed)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
unsigned long start, end;
unsigned long cycles;
unsigned int i, j, k;
int ret;
data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
if (!data)
return;
tfm = crypto_alloc_ahash(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto free_data;
}
for (i = 0; i < 8; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
init_completion(&data[i].tresult.completion);
data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: hash: Failed to allocate request for %s\n",
algo);
goto out;
}
ahash_request_set_callback(data[i].req, 0,
tcrypt_complete, &data[i].tresult);
test_hash_sg_init(data[i].sg);
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
for (i = 0; speed[i].blen != 0; i++) {
/* For some reason this only tests digests. */
if (speed[i].blen != speed[i].plen)
continue;
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
for (k = 0; k < 8; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
start = get_cycles();
for (k = 0; k < 8; k++) {
ret = crypto_ahash_digest(data[k].req);
if (ret == -EINPROGRESS) {
ret = 0;
continue;
}
if (ret)
break;
complete(&data[k].tresult.completion);
data[k].tresult.err = 0;
}
for (j = 0; j < k; j++) {
struct tcrypt_result *tr = &data[j].tresult;
wait_for_completion(&tr->completion);
if (tr->err)
ret = tr->err;
}
end = get_cycles();
cycles = end - start;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles, cycles / (8 * speed[i].blen));
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
break;
}
}
out:
for (k = 0; k < 8; ++k)
ahash_request_free(data[k].req);
for (k = 0; k < 8; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
free_data:
kfree(data);
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
return ret;
}
printk("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_jiffies(struct ahash_request *req, int blen,
int plen, char *out, int secs)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_ahash_jiffies_digest(req, blen, out, secs);
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
char *out)
{
unsigned long cycles = 0;
int ret, i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_ahash_cycles(struct ahash_request *req, int blen,
int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount, ret;
if (plen == blen)
return test_ahash_cycles_digest(req, blen, out);
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_ahash_speed_common(const char *algo, unsigned int secs,
struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
struct tcrypt_result tresult;
struct ahash_request *req;
struct crypto_ahash *tfm;
char *output;
int i, ret;
tfm = crypto_alloc_ahash(algo, 0, mask);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
return;
}
printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
MAX_DIGEST_SIZE);
goto out;
}
test_hash_sg_init(sg);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("ahash request allocation failure\n");
goto out;
}
init_completion(&tresult.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
if (!output)
goto out_nomem;
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
break;
}
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (secs)
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
else
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
break;
}
}
kfree(output);
out_nomem:
ahash_request_free(req);
out:
crypto_free_ahash(tfm);
}
static void test_ahash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, 0);
}
static void test_hash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
struct tcrypt_result *tr = req->base.data;
wait_for_completion(&tr->completion);
reinit_completion(&tr->completion);
ret = tr->err;
}
return ret;
}
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%ld bytes)\n",
bcount, secs, (long)bcount * blen);
return 0;
}
static int test_acipher_cycles(struct skcipher_request *req, int enc,
int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
struct tcrypt_result tresult;
const char *key;
char iv[128];
struct skcipher_request *req;
struct crypto_skcipher *tfm;
const char *e;
u32 *b_size;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
init_completion(&tresult.completion);
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
pr_info("\ntesting speed of async %s (%s) %s\n", algo,
get_driver_name(crypto_skcipher, tfm), e);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
algo);
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
i = 0;
do {
b_size = block_sizes;
do {
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for "
"tvmem (%lu)\n", *keysize + *b_size,
TVMEMSIZE * PAGE_SIZE);
goto out_free_req;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, *b_size);
memset(tvmem[0], 0xff, PAGE_SIZE);
/* set key, plain text and IV */
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_skcipher_clear_flags(tfm, ~0);
ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_skcipher_get_flags(tfm));
goto out_free_req;
}
k = *keysize + *b_size;
sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
if (k > PAGE_SIZE) {
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
k -= PAGE_SIZE;
j = 1;
while (k > PAGE_SIZE) {
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
memset(tvmem[j], 0xff, PAGE_SIZE);
j++;
k -= PAGE_SIZE;
}
sg_set_buf(sg + j, tvmem[j], k);
memset(tvmem[j], 0xff, k);
} else {
sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
if (secs)
ret = test_acipher_jiffies(req, enc,
*b_size, secs);
else
ret = test_acipher_cycles(req, enc,
*b_size);
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out_free_req:
skcipher_request_free(req);
out:
crypto_free_skcipher(tfm);
}
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
true);
}
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
false);
}
static void test_available(void)
{
char **name = check;
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}
}
static inline int tcrypt_test(const char *alg)
{
int ret;
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL in fips mode */
if (fips_enabled && ret == -EINVAL)
ret = 0;
return ret;
}
static int do_test(const char *alg, u32 type, u32 mask, int m)
{
int i;
int ret = 0;
switch (m) {
case 0:
if (alg) {
if (!crypto_has_alg(alg, type,
mask ?: CRYPTO_ALG_TYPE_MASK))
ret = -ENOENT;
break;
}
for (i = 1; i < 200; i++)
ret += do_test(NULL, 0, 0, i);
break;
case 1:
ret += tcrypt_test("md5");
break;
case 2:
ret += tcrypt_test("sha1");
break;
case 3:
ret += tcrypt_test("ecb(des)");
ret += tcrypt_test("cbc(des)");
ret += tcrypt_test("ctr(des)");
break;
case 4:
ret += tcrypt_test("ecb(des3_ede)");
ret += tcrypt_test("cbc(des3_ede)");
ret += tcrypt_test("ctr(des3_ede)");
break;
case 5:
ret += tcrypt_test("md4");
break;
case 6:
ret += tcrypt_test("sha256");
break;
case 7:
ret += tcrypt_test("ecb(blowfish)");
ret += tcrypt_test("cbc(blowfish)");
ret += tcrypt_test("ctr(blowfish)");
break;
case 8:
ret += tcrypt_test("ecb(twofish)");
ret += tcrypt_test("cbc(twofish)");
ret += tcrypt_test("ctr(twofish)");
ret += tcrypt_test("lrw(twofish)");
ret += tcrypt_test("xts(twofish)");
break;
case 9:
ret += tcrypt_test("ecb(serpent)");
ret += tcrypt_test("cbc(serpent)");
ret += tcrypt_test("ctr(serpent)");
ret += tcrypt_test("lrw(serpent)");
ret += tcrypt_test("xts(serpent)");
break;
case 10:
ret += tcrypt_test("ecb(aes)");
ret += tcrypt_test("cbc(aes)");
ret += tcrypt_test("lrw(aes)");
ret += tcrypt_test("xts(aes)");
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
break;
case 11:
ret += tcrypt_test("sha384");
break;
case 12:
ret += tcrypt_test("sha512");
break;
case 13:
ret += tcrypt_test("deflate");
break;
case 14:
ret += tcrypt_test("ecb(cast5)");
ret += tcrypt_test("cbc(cast5)");
ret += tcrypt_test("ctr(cast5)");
break;
case 15:
ret += tcrypt_test("ecb(cast6)");
ret += tcrypt_test("cbc(cast6)");
ret += tcrypt_test("ctr(cast6)");
ret += tcrypt_test("lrw(cast6)");
ret += tcrypt_test("xts(cast6)");
break;
case 16:
ret += tcrypt_test("ecb(arc4)");
break;
case 17:
ret += tcrypt_test("michael_mic");
break;
case 18:
ret += tcrypt_test("crc32c");
break;
case 19:
ret += tcrypt_test("ecb(tea)");
break;
case 20:
ret += tcrypt_test("ecb(xtea)");
break;
case 21:
ret += tcrypt_test("ecb(khazad)");
break;
case 22:
ret += tcrypt_test("wp512");
break;
case 23:
ret += tcrypt_test("wp384");
break;
case 24:
ret += tcrypt_test("wp256");
break;
case 25:
ret += tcrypt_test("ecb(tnepres)");
break;
case 26:
ret += tcrypt_test("ecb(anubis)");
ret += tcrypt_test("cbc(anubis)");
break;
case 27:
ret += tcrypt_test("tgr192");
break;
case 28:
ret += tcrypt_test("tgr160");
break;
case 29:
ret += tcrypt_test("tgr128");
break;
case 30:
ret += tcrypt_test("ecb(xeta)");
break;
case 31:
ret += tcrypt_test("pcbc(fcrypt)");
break;
case 32:
ret += tcrypt_test("ecb(camellia)");
ret += tcrypt_test("cbc(camellia)");
ret += tcrypt_test("ctr(camellia)");
ret += tcrypt_test("lrw(camellia)");
ret += tcrypt_test("xts(camellia)");
break;
case 33:
ret += tcrypt_test("sha224");
break;
case 34:
ret += tcrypt_test("salsa20");
break;
case 35:
ret += tcrypt_test("gcm(aes)");
break;
case 36:
ret += tcrypt_test("lzo");
break;
case 37:
ret += tcrypt_test("ccm(aes)");
break;
case 38:
ret += tcrypt_test("cts(cbc(aes))");
break;
case 39:
ret += tcrypt_test("rmd128");
break;
case 40:
ret += tcrypt_test("rmd160");
break;
case 41:
ret += tcrypt_test("rmd256");
break;
case 42:
ret += tcrypt_test("rmd320");
break;
case 43:
ret += tcrypt_test("ecb(seed)");
break;
case 44:
ret += tcrypt_test("zlib");
break;
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
case 46:
ret += tcrypt_test("ghash");
break;
case 47:
ret += tcrypt_test("crct10dif");
break;
case 48:
ret += tcrypt_test("sha3-224");
break;
case 49:
ret += tcrypt_test("sha3-256");
break;
case 50:
ret += tcrypt_test("sha3-384");
break;
case 51:
ret += tcrypt_test("sha3-512");
break;
case 100:
ret += tcrypt_test("hmac(md5)");
break;
case 101:
ret += tcrypt_test("hmac(sha1)");
break;
case 102:
ret += tcrypt_test("hmac(sha256)");
break;
case 103:
ret += tcrypt_test("hmac(sha384)");
break;
case 104:
ret += tcrypt_test("hmac(sha512)");
break;
case 105:
ret += tcrypt_test("hmac(sha224)");
break;
case 106:
ret += tcrypt_test("xcbc(aes)");
break;
case 107:
ret += tcrypt_test("hmac(rmd128)");
break;
case 108:
ret += tcrypt_test("hmac(rmd160)");
break;
case 109:
ret += tcrypt_test("vmac(aes)");
break;
case 110:
ret += tcrypt_test("hmac(crc32)");
break;
case 111:
ret += tcrypt_test("hmac(sha3-224)");
break;
case 112:
ret += tcrypt_test("hmac(sha3-256)");
break;
case 113:
ret += tcrypt_test("hmac(sha3-384)");
break;
case 114:
ret += tcrypt_test("hmac(sha3-512)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break;
case 151:
ret += tcrypt_test("rfc4106(gcm(aes))");
break;
case 152:
ret += tcrypt_test("rfc4543(gcm(aes))");
break;
case 153:
ret += tcrypt_test("cmac(aes)");
break;
case 154:
ret += tcrypt_test("cmac(des3_ede)");
break;
case 155:
ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
break;
case 156:
ret += tcrypt_test("authenc(hmac(md5),ecb(cipher_null))");
break;
case 157:
ret += tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))");
break;
case 181:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des))");
break;
case 182:
ret += tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))");
break;
case 183:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des))");
break;
case 184:
ret += tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))");
break;
case 185:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des))");
break;
case 186:
ret += tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))");
break;
case 187:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des))");
break;
case 188:
ret += tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))");
break;
case 189:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des))");
break;
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 201:
test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 202:
test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 203:
test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 204:
test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 205:
test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 206:
test_cipher_speed("salsa20", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
break;
case 207:
test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 208:
test_cipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 209:
test_cipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 210:
test_cipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
speed_template_32);
break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
/* fall through */
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 310:
test_hash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 311:
test_hash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 312:
test_hash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 314:
test_hash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 316:
test_hash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 317:
test_hash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 318:
test_hash_speed("ghash-generic", sec, hash_speed_template_16);
if (mode > 300 && mode < 400) break;
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
case 399:
break;
case 400:
if (alg) {
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
/* fall through */
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 410:
test_ahash_speed("tgr128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 411:
test_ahash_speed("tgr160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 412:
test_ahash_speed("tgr192", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 414:
test_ahash_speed("rmd128", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 416:
test_ahash_speed("rmd256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 417:
test_ahash_speed("rmd320", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 422:
test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 423:
test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 424:
test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
case 499:
break;
case 500:
test_acipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0,
speed_template_20_28_36);
test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0,
speed_template_20_28_36);
break;
case 501:
test_acipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 502:
test_acipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 503:
test_acipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 504:
test_acipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 505:
test_acipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 506:
test_acipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 507:
test_acipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 508:
test_acipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 509:
test_acipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 1000:
test_available();
break;
}
return ret;
}
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
int i;
for (i = 0; i < TVMEMSIZE; i++) {
tvmem[i] = (void *)__get_free_page(GFP_KERNEL);
if (!tvmem[i])
goto err_free_tv;
}
err = do_test(alg, type, mask, mode);
if (err) {
printk(KERN_ERR "tcrypt: one or more tests failed!\n");
goto err_free_tv;
}
/* We intentionaly return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we?
* -- mludvig
*/
if (!fips_enabled)
err = -EAGAIN;
err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
free_page((unsigned long)tvmem[i]);
return err;
}
/*
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
static void __exit tcrypt_mod_fini(void) { }
module_init(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
module_param(type, uint, 0);
module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
| gpl-2.0 |
sdwuyawen/u-boot | arch/powerpc/cpu/ppc4xx/cpu_init.c | 152 | 15604 | /*
* (C) Copyright 2000-2007
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <watchdog.h>
#include <asm/ppc4xx-emac.h>
#include <asm/processor.h>
#include <asm/ppc4xx-gpio.h>
#include <asm/ppc4xx.h>
#if defined(CONFIG_405GP) || defined(CONFIG_405EP)
DECLARE_GLOBAL_DATA_PTR;
#endif
#ifndef CONFIG_SYS_PLL_RECONFIG
#define CONFIG_SYS_PLL_RECONFIG 0
#endif
#if defined(CONFIG_440EPX) || \
defined(CONFIG_460EX) || defined(CONFIG_460GT)
static void reset_with_rli(void)
{
u32 reg;
/*
* Set reload inhibit so configuration will persist across
* processor resets
*/
mfcpr(CPR0_ICFG, reg);
reg |= CPR0_ICFG_RLI_MASK;
mtcpr(CPR0_ICFG, reg);
/* Reset processor if configuration changed */
__asm__ __volatile__ ("sync; isync");
mtspr(SPRN_DBCR0, 0x20000000);
}
#endif
void reconfigure_pll(u32 new_cpu_freq)
{
#if defined(CONFIG_440EPX)
int reset_needed = 0;
u32 reg, temp;
u32 prbdv0, target_prbdv0, /* CLK_PRIMBD */
fwdva, target_fwdva, fwdvb, target_fwdvb, /* CLK_PLLD */
fbdv, target_fbdv, lfbdv, target_lfbdv,
perdv0, target_perdv0, /* CLK_PERD */
spcid0, target_spcid0; /* CLK_SPCID */
/* Reconfigure clocks if necessary.
* See PPC440EPx User's Manual, sections 8.2 and 14 */
if (new_cpu_freq == 667) {
target_prbdv0 = 2;
target_fwdva = 2;
target_fwdvb = 4;
target_fbdv = 20;
target_lfbdv = 1;
target_perdv0 = 4;
target_spcid0 = 4;
mfcpr(CPR0_PRIMBD0, reg);
temp = (reg & PRBDV_MASK) >> 24;
prbdv0 = temp ? temp : 8;
if (prbdv0 != target_prbdv0) {
reg &= ~PRBDV_MASK;
reg |= ((target_prbdv0 == 8 ? 0 : target_prbdv0) << 24);
mtcpr(CPR0_PRIMBD0, reg);
reset_needed = 1;
}
mfcpr(CPR0_PLLD, reg);
temp = (reg & PLLD_FWDVA_MASK) >> 16;
fwdva = temp ? temp : 16;
temp = (reg & PLLD_FWDVB_MASK) >> 8;
fwdvb = temp ? temp : 8;
temp = (reg & PLLD_FBDV_MASK) >> 24;
fbdv = temp ? temp : 32;
temp = (reg & PLLD_LFBDV_MASK);
lfbdv = temp ? temp : 64;
if (fwdva != target_fwdva || fbdv != target_fbdv || lfbdv != target_lfbdv) {
reg &= ~(PLLD_FWDVA_MASK | PLLD_FWDVB_MASK |
PLLD_FBDV_MASK | PLLD_LFBDV_MASK);
reg |= ((target_fwdva == 16 ? 0 : target_fwdva) << 16) |
((target_fwdvb == 8 ? 0 : target_fwdvb) << 8) |
((target_fbdv == 32 ? 0 : target_fbdv) << 24) |
(target_lfbdv == 64 ? 0 : target_lfbdv);
mtcpr(CPR0_PLLD, reg);
reset_needed = 1;
}
mfcpr(CPR0_PERD, reg);
perdv0 = (reg & CPR0_PERD_PERDV0_MASK) >> 24;
if (perdv0 != target_perdv0) {
reg &= ~CPR0_PERD_PERDV0_MASK;
reg |= (target_perdv0 << 24);
mtcpr(CPR0_PERD, reg);
reset_needed = 1;
}
mfcpr(CPR0_SPCID, reg);
temp = (reg & CPR0_SPCID_SPCIDV0_MASK) >> 24;
spcid0 = temp ? temp : 4;
if (spcid0 != target_spcid0) {
reg &= ~CPR0_SPCID_SPCIDV0_MASK;
reg |= ((target_spcid0 == 4 ? 0 : target_spcid0) << 24);
mtcpr(CPR0_SPCID, reg);
reset_needed = 1;
}
}
/* Get current value of FWDVA.*/
mfcpr(CPR0_PLLD, reg);
temp = (reg & PLLD_FWDVA_MASK) >> 16;
/*
* Check to see if FWDVA has been set to value of 1. if it has we must
* modify it.
*/
if (temp == 1) {
/*
* Load register that contains current boot strapping option.
*/
mfcpr(CPR0_ICFG, reg);
/*
* Strapping option bits (ICS) are already in correct position,
* only masking needed.
*/
reg &= CPR0_ICFG_ICS_MASK;
if ((reg == BOOT_STRAP_OPTION_A) || (reg == BOOT_STRAP_OPTION_B) ||
(reg == BOOT_STRAP_OPTION_D) || (reg == BOOT_STRAP_OPTION_E)) {
mfcpr(CPR0_PLLD, reg);
/* Get current value of fbdv. */
temp = (reg & PLLD_FBDV_MASK) >> 24;
fbdv = temp ? temp : 32;
/* Get current value of lfbdv. */
temp = (reg & PLLD_LFBDV_MASK);
lfbdv = temp ? temp : 64;
/*
* Get current value of FWDVA. Assign current FWDVA to
* new FWDVB.
*/
mfcpr(CPR0_PLLD, reg);
target_fwdvb = (reg & PLLD_FWDVA_MASK) >> 16;
fwdvb = target_fwdvb ? target_fwdvb : 8;
/*
* Get current value of FWDVB. Assign current FWDVB to
* new FWDVA.
*/
target_fwdva = (reg & PLLD_FWDVB_MASK) >> 8;
fwdva = target_fwdva ? target_fwdva : 16;
/*
* Update CPR0_PLLD with switched FWDVA and FWDVB.
*/
reg &= ~(PLLD_FWDVA_MASK | PLLD_FWDVB_MASK |
PLLD_FBDV_MASK | PLLD_LFBDV_MASK);
reg |= ((fwdva == 16 ? 0 : fwdva) << 16) |
((fwdvb == 8 ? 0 : fwdvb) << 8) |
((fbdv == 32 ? 0 : fbdv) << 24) |
(lfbdv == 64 ? 0 : lfbdv);
mtcpr(CPR0_PLLD, reg);
/* Acknowledge that a reset is required. */
reset_needed = 1;
}
}
/* Now reset the CPU if needed */
if (reset_needed)
reset_with_rli();
#endif
#if defined(CONFIG_460EX) || defined(CONFIG_460GT)
u32 reg;
/*
* See "9.2.1.1 Booting with Option E" in the 460EX/GT
* users manual
*/
mfcpr(CPR0_PLLC, reg);
if ((reg & (CPR0_PLLC_RST | CPR0_PLLC_ENG)) == CPR0_PLLC_RST) {
/*
* Set engage bit
*/
reg = (reg & ~CPR0_PLLC_RST) | CPR0_PLLC_ENG;
mtcpr(CPR0_PLLC, reg);
/* Now reset the CPU */
reset_with_rli();
}
#endif
}
#ifdef CONFIG_SYS_4xx_CHIP_21_ERRATA
void
chip_21_errata(void)
{
/*
* See rev 1.09 of the 405EX/405EXr errata. CHIP_21 says that
* sometimes reading the PVR and/or SDR0_ECID results in incorrect
* values. Since the rev-D chip uses the SDR0_ECID bits to control
* internal features, that means the second PCIe or ethernet of an EX
* variant could fail to work. Also, security features of both EX and
* EXr might be incorrectly disabled.
*
* The suggested workaround is as follows (covering rev-C and rev-D):
*
* 1.Read the PVR and SDR0_ECID3.
*
* 2.If the PVR matches an expected Revision C PVR value AND if
* SDR0_ECID3[12:15] is different from PVR[28:31], then processor is
* Revision C: continue executing the initialization code (no reset
* required). else go to step 3.
*
* 3.If the PVR matches an expected Revision D PVR value AND if
* SDR0_ECID3[10:11] matches its expected value, then continue
* executing initialization code, no reset required. else write
* DBCR0[RST] = 0b11 to generate a SysReset.
*/
u32 pvr;
u32 pvr_28_31;
u32 ecid3;
u32 ecid3_10_11;
u32 ecid3_12_15;
/* Step 1: */
pvr = get_pvr();
mfsdr(SDR0_ECID3, ecid3);
/* Step 2: */
pvr_28_31 = pvr & 0xf;
ecid3_10_11 = (ecid3 >> 20) & 0x3;
ecid3_12_15 = (ecid3 >> 16) & 0xf;
if ((pvr == CONFIG_405EX_CHIP21_PVR_REV_C) &&
(pvr_28_31 != ecid3_12_15)) {
/* No reset required. */
return;
}
/* Step 3: */
if ((pvr == CONFIG_405EX_CHIP21_PVR_REV_D) &&
(ecid3_10_11 == CONFIG_405EX_CHIP21_ECID3_REV_D)) {
/* No reset required. */
return;
}
/* Reset required. */
__asm__ __volatile__ ("sync; isync");
mtspr(SPRN_DBCR0, 0x30000000);
}
#endif
/*
* Breath some life into the CPU...
*
* Reconfigure PLL if necessary,
* set up the memory map,
* initialize a bunch of registers
*/
void
cpu_init_f (void)
{
#if defined(CONFIG_WATCHDOG) || defined(CONFIG_440GX) || defined(CONFIG_460EX)
u32 val;
#endif
#ifdef CONFIG_SYS_4xx_CHIP_21_ERRATA
chip_21_errata();
#endif
reconfigure_pll(CONFIG_SYS_PLL_RECONFIG);
#if (defined(CONFIG_405EP) || defined (CONFIG_405EX)) && \
!defined(CONFIG_APM821XX) &&!defined(CONFIG_SYS_4xx_GPIO_TABLE)
/*
* GPIO0 setup (select GPIO or alternate function)
*/
#if defined(CONFIG_SYS_GPIO0_OR)
out32(GPIO0_OR, CONFIG_SYS_GPIO0_OR); /* set initial state of output pins */
#endif
#if defined(CONFIG_SYS_GPIO0_ODR)
out32(GPIO0_ODR, CONFIG_SYS_GPIO0_ODR); /* open-drain select */
#endif
out32(GPIO0_OSRH, CONFIG_SYS_GPIO0_OSRH); /* output select */
out32(GPIO0_OSRL, CONFIG_SYS_GPIO0_OSRL);
out32(GPIO0_ISR1H, CONFIG_SYS_GPIO0_ISR1H); /* input select */
out32(GPIO0_ISR1L, CONFIG_SYS_GPIO0_ISR1L);
out32(GPIO0_TSRH, CONFIG_SYS_GPIO0_TSRH); /* three-state select */
out32(GPIO0_TSRL, CONFIG_SYS_GPIO0_TSRL);
#if defined(CONFIG_SYS_GPIO0_ISR2H)
out32(GPIO0_ISR2H, CONFIG_SYS_GPIO0_ISR2H);
out32(GPIO0_ISR2L, CONFIG_SYS_GPIO0_ISR2L);
#endif
#if defined (CONFIG_SYS_GPIO0_TCR)
out32(GPIO0_TCR, CONFIG_SYS_GPIO0_TCR); /* enable output driver for outputs */
#endif
#endif /* CONFIG_405EP ... && !CONFIG_SYS_4xx_GPIO_TABLE */
#if defined (CONFIG_405EP)
/*
* Set EMAC noise filter bits
*/
mtdcr(CPC0_EPCTL, CPC0_EPCTL_E0NFE | CPC0_EPCTL_E1NFE);
#endif /* CONFIG_405EP */
#if defined(CONFIG_SYS_4xx_GPIO_TABLE)
gpio_set_chip_configuration();
#endif /* CONFIG_SYS_4xx_GPIO_TABLE */
/*
* External Bus Controller (EBC) Setup
*/
#if (defined(CONFIG_SYS_EBC_PB0AP) && defined(CONFIG_SYS_EBC_PB0CR))
#if (defined(CONFIG_405GP) || defined(CONFIG_405CR) || \
defined(CONFIG_405EP) || defined(CONFIG_405EZ) || \
defined(CONFIG_405EX) || defined(CONFIG_405))
/*
* Move the next instructions into icache, since these modify the flash
* we are running from!
*/
asm volatile(" bl 0f" ::: "lr");
asm volatile("0: mflr 3" ::: "r3");
asm volatile(" addi 4, 0, 14" ::: "r4");
asm volatile(" mtctr 4" ::: "ctr");
asm volatile("1: icbt 0, 3");
asm volatile(" addi 3, 3, 32" ::: "r3");
asm volatile(" bdnz 1b" ::: "ctr", "cr0");
asm volatile(" addis 3, 0, 0x0" ::: "r3");
asm volatile(" ori 3, 3, 0xA000" ::: "r3");
asm volatile(" mtctr 3" ::: "ctr");
asm volatile("2: bdnz 2b" ::: "ctr", "cr0");
#endif
mtebc(PB0AP, CONFIG_SYS_EBC_PB0AP);
mtebc(PB0CR, CONFIG_SYS_EBC_PB0CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB1AP) && defined(CONFIG_SYS_EBC_PB1CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 1))
mtebc(PB1AP, CONFIG_SYS_EBC_PB1AP);
mtebc(PB1CR, CONFIG_SYS_EBC_PB1CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB2AP) && defined(CONFIG_SYS_EBC_PB2CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 2))
mtebc(PB2AP, CONFIG_SYS_EBC_PB2AP);
mtebc(PB2CR, CONFIG_SYS_EBC_PB2CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB3AP) && defined(CONFIG_SYS_EBC_PB3CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 3))
mtebc(PB3AP, CONFIG_SYS_EBC_PB3AP);
mtebc(PB3CR, CONFIG_SYS_EBC_PB3CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB4AP) && defined(CONFIG_SYS_EBC_PB4CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 4))
mtebc(PB4AP, CONFIG_SYS_EBC_PB4AP);
mtebc(PB4CR, CONFIG_SYS_EBC_PB4CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB5AP) && defined(CONFIG_SYS_EBC_PB5CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 5))
mtebc(PB5AP, CONFIG_SYS_EBC_PB5AP);
mtebc(PB5CR, CONFIG_SYS_EBC_PB5CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB6AP) && defined(CONFIG_SYS_EBC_PB6CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 6))
mtebc(PB6AP, CONFIG_SYS_EBC_PB6AP);
mtebc(PB6CR, CONFIG_SYS_EBC_PB6CR);
#endif
#if (defined(CONFIG_SYS_EBC_PB7AP) && defined(CONFIG_SYS_EBC_PB7CR) && !(CONFIG_SYS_INIT_DCACHE_CS == 7))
mtebc(PB7AP, CONFIG_SYS_EBC_PB7AP);
mtebc(PB7CR, CONFIG_SYS_EBC_PB7CR);
#endif
#if defined (CONFIG_SYS_EBC_CFG)
mtebc(EBC0_CFG, CONFIG_SYS_EBC_CFG);
#endif
#if defined(CONFIG_WATCHDOG)
val = mfspr(SPRN_TCR);
#if defined(CONFIG_440EP) || defined(CONFIG_440GR)
val |= 0xb8000000; /* generate system reset after 1.34 seconds */
#elif defined(CONFIG_440EPX)
val |= 0xb0000000; /* generate system reset after 1.34 seconds */
#else
val |= 0xf0000000; /* generate system reset after 2.684 seconds */
#endif
#if defined(CONFIG_SYS_4xx_RESET_TYPE)
val &= ~0x30000000; /* clear WRC bits */
val |= CONFIG_SYS_4xx_RESET_TYPE << 28; /* set board specific WRC type */
#endif
mtspr(SPRN_TCR, val);
val = mfspr(SPRN_TSR);
val |= 0x80000000; /* enable watchdog timer */
mtspr(SPRN_TSR, val);
reset_4xx_watchdog();
#endif /* CONFIG_WATCHDOG */
#if defined(CONFIG_440GX)
/* Take the GX out of compatibility mode
* Travis Sawyer, 9 Mar 2004
* NOTE: 440gx user manual inconsistency here
* Compatibility mode and Ethernet Clock select are not
* correct in the manual
*/
mfsdr(SDR0_MFR, val);
val &= ~0x10000000;
mtsdr(SDR0_MFR,val);
#endif /* CONFIG_440GX */
#if defined(CONFIG_460EX)
/*
* Set SDR0_AHB_CFG[A2P_INCR4] (bit 24) and
* clear SDR0_AHB_CFG[A2P_PROT2] (bit 25) for a new 460EX errata
* regarding concurrent use of AHB USB OTG, USB 2.0 host and SATA
*/
mfsdr(SDR0_AHB_CFG, val);
val |= 0x80;
val &= ~0x40;
mtsdr(SDR0_AHB_CFG, val);
mfsdr(SDR0_USB2HOST_CFG, val);
val &= ~0xf00;
val |= 0x400;
mtsdr(SDR0_USB2HOST_CFG, val);
#endif /* CONFIG_460EX */
#if defined(CONFIG_405EX) || \
defined(CONFIG_440SP) || defined(CONFIG_440SPE) || \
defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
defined(CONFIG_460SX) || defined(CONFIG_APM821XX)
/*
* Set PLB4 arbiter (Segment 0 and 1) to 4 deep pipeline read
*/
mtdcr(PLB4A0_ACR, (mfdcr(PLB4A0_ACR) & ~PLB4Ax_ACR_RDP_MASK) |
PLB4Ax_ACR_RDP_4DEEP);
mtdcr(PLB4A1_ACR, (mfdcr(PLB4A1_ACR) & ~PLB4Ax_ACR_RDP_MASK) |
PLB4Ax_ACR_RDP_4DEEP);
#endif /* CONFIG_440SP/SPE || CONFIG_460EX/GT || CONFIG_405EX */
}
/*
* initialize higher level parts of CPU like time base and timers
*/
int cpu_init_r (void)
{
#if defined(CONFIG_405GP)
uint pvr = get_pvr();
/*
* Set edge conditioning circuitry on PPC405GPr
* for compatibility to existing PPC405GP designs.
*/
if ((pvr & 0xfffffff0) == (PVR_405GPR_RB & 0xfffffff0)) {
mtdcr(CPC0_ECR, 0x60606000);
}
#endif /* defined(CONFIG_405GP) */
return 0;
}
#if defined(CONFIG_PCI) && \
(defined(CONFIG_440EP) || defined(CONFIG_440EPX) || \
defined(CONFIG_440GR) || defined(CONFIG_440GRX))
/*
* 440EP(x)/GR(x) PCI async/sync clocking restriction:
*
* In asynchronous PCI mode, the synchronous PCI clock must meet
* certain requirements. The following equation describes the
* relationship that must be maintained between the asynchronous PCI
* clock and synchronous PCI clock. Select an appropriate PCI:PLB
* ratio to maintain the relationship:
*
* AsyncPCIClk - 1MHz <= SyncPCIclock <= (2 * AsyncPCIClk) - 1MHz
*/
static int ppc4xx_pci_sync_clock_ok(u32 sync, u32 async)
{
if (((async - 1000000) > sync) || (sync > ((2 * async) - 1000000)))
return 0;
else
return 1;
}
int ppc4xx_pci_sync_clock_config(u32 async)
{
sys_info_t sys_info;
u32 sync;
int div;
u32 reg;
u32 spcid_val[] = {
CPR0_SPCID_SPCIDV0_DIV1, CPR0_SPCID_SPCIDV0_DIV2,
CPR0_SPCID_SPCIDV0_DIV3, CPR0_SPCID_SPCIDV0_DIV4 };
get_sys_info(&sys_info);
sync = sys_info.freqPCI;
/*
* First check if the equation above is met
*/
if (!ppc4xx_pci_sync_clock_ok(sync, async)) {
/*
* Reconfigure PCI sync clock to meet the equation.
* Start with highest possible PCI sync frequency
* (divider 1).
*/
for (div = 1; div <= 4; div++) {
sync = sys_info.freqPLB / div;
if (ppc4xx_pci_sync_clock_ok(sync, async))
break;
}
if (div <= 4) {
mtcpr(CPR0_SPCID, spcid_val[div]);
mfcpr(CPR0_ICFG, reg);
reg |= CPR0_ICFG_RLI_MASK;
mtcpr(CPR0_ICFG, reg);
/* do chip reset */
mtspr(SPRN_DBCR0, 0x20000000);
} else {
/* Impossible to configure the PCI sync clock */
return -1;
}
}
return 0;
}
#endif
| gpl-2.0 |
tpmullan/android_kernel_asus_tf700 | arch/arm/mach-omap2/board-ldp.c | 152 | 10933 | /*
* linux/arch/arm/mach-omap2/board-ldp.c
*
* Copyright (C) 2008 Texas Instruments Inc.
* Nishant Kamat <nskamat@ti.com>
*
* Modified from mach-omap2/board-3430sdp.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/gpio_keys.h>
#include <linux/workqueue.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
#include <linux/mmc/host.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/mcspi.h>
#include <mach/gpio.h>
#include <plat/board.h>
#include <plat/common.h>
#include <plat/gpmc.h>
#include <mach/board-zoom.h>
#include <asm/delay.h>
#include <plat/usb.h>
#include "board-flash.h"
#include "mux.h"
#include "hsmmc.h"
#include "control.h"
#define LDP_SMSC911X_CS 1
#define LDP_SMSC911X_GPIO 152
#define DEBUG_BASE 0x08000000
#define LDP_ETHR_START DEBUG_BASE
static struct resource ldp_smsc911x_resources[] = {
[0] = {
.start = LDP_ETHR_START,
.end = LDP_ETHR_START + SZ_4K,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
static struct smsc911x_platform_config ldp_smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device ldp_smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(ldp_smsc911x_resources),
.resource = ldp_smsc911x_resources,
.dev = {
.platform_data = &ldp_smsc911x_config,
},
};
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
KEY(2, 0, KEY_3),
KEY(0, 1, KEY_4),
KEY(1, 1, KEY_5),
KEY(2, 1, KEY_6),
KEY(3, 1, KEY_F5),
KEY(0, 2, KEY_7),
KEY(1, 2, KEY_8),
KEY(2, 2, KEY_9),
KEY(3, 2, KEY_F6),
KEY(0, 3, KEY_F7),
KEY(1, 3, KEY_0),
KEY(2, 3, KEY_F8),
PERSISTENT_KEY(4, 5),
KEY(4, 4, KEY_VOLUMEUP),
KEY(5, 5, KEY_VOLUMEDOWN),
0
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data ldp_kp_twl4030_data = {
.keymap_data = &board_map_data,
.rows = 6,
.cols = 6,
.rep = 1,
};
static struct gpio_keys_button ldp_gpio_keys_buttons[] = {
[0] = {
.code = KEY_ENTER,
.gpio = 101,
.desc = "enter sw",
.active_low = 1,
.debounce_interval = 30,
},
[1] = {
.code = KEY_F1,
.gpio = 102,
.desc = "func 1",
.active_low = 1,
.debounce_interval = 30,
},
[2] = {
.code = KEY_F2,
.gpio = 103,
.desc = "func 2",
.active_low = 1,
.debounce_interval = 30,
},
[3] = {
.code = KEY_F3,
.gpio = 104,
.desc = "func 3",
.active_low = 1,
.debounce_interval = 30,
},
[4] = {
.code = KEY_F4,
.gpio = 105,
.desc = "func 4",
.active_low = 1,
.debounce_interval = 30,
},
[5] = {
.code = KEY_LEFT,
.gpio = 106,
.desc = "left sw",
.active_low = 1,
.debounce_interval = 30,
},
[6] = {
.code = KEY_RIGHT,
.gpio = 107,
.desc = "right sw",
.active_low = 1,
.debounce_interval = 30,
},
[7] = {
.code = KEY_UP,
.gpio = 108,
.desc = "up sw",
.active_low = 1,
.debounce_interval = 30,
},
[8] = {
.code = KEY_DOWN,
.gpio = 109,
.desc = "down sw",
.active_low = 1,
.debounce_interval = 30,
},
};
static struct gpio_keys_platform_data ldp_gpio_keys = {
.buttons = ldp_gpio_keys_buttons,
.nbuttons = ARRAY_SIZE(ldp_gpio_keys_buttons),
.rep = 1,
};
static struct platform_device ldp_gpio_keys_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &ldp_gpio_keys,
},
};
static int ts_gpio;
/**
* @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq
*
* @return - void. If request gpio fails then Flag KERN_ERR.
*/
static void ads7846_dev_init(void)
{
if (gpio_request(ts_gpio, "ads7846 irq") < 0) {
printk(KERN_ERR "can't get ads746 pen down GPIO\n");
return;
}
gpio_direction_input(ts_gpio);
gpio_set_debounce(ts_gpio, 310);
}
static int ads7846_get_pendown_state(void)
{
return !gpio_get_value(ts_gpio);
}
static struct ads7846_platform_data tsc2046_config __initdata = {
.get_pendown_state = ads7846_get_pendown_state,
.keep_vref_on = 1,
};
static struct omap2_mcspi_device_config tsc2046_mcspi_config = {
.turbo_mode = 0,
.single_channel = 1, /* 0: slave, 1: master */
};
static struct spi_board_info ldp_spi_board_info[] __initdata = {
[0] = {
/*
* TSC2046 operates at a max freqency of 2MHz, so
* operate slightly below at 1.5MHz
*/
.modalias = "ads7846",
.bus_num = 1,
.chip_select = 0,
.max_speed_hz = 1500000,
.controller_data = &tsc2046_mcspi_config,
.irq = 0,
.platform_data = &tsc2046_config,
},
};
static inline void __init ldp_init_smsc911x(void)
{
int eth_cs;
unsigned long cs_mem_base;
int eth_gpio = 0;
eth_cs = LDP_SMSC911X_CS;
if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
return;
}
ldp_smsc911x_resources[0].start = cs_mem_base + 0x0;
ldp_smsc911x_resources[0].end = cs_mem_base + 0xff;
udelay(100);
eth_gpio = LDP_SMSC911X_GPIO;
ldp_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio);
if (gpio_request(eth_gpio, "smsc911x irq") < 0) {
printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
eth_gpio);
return;
}
gpio_direction_input(eth_gpio);
}
static struct platform_device ldp_lcd_device = {
.name = "ldp_lcd",
.id = -1,
};
static struct omap_lcd_config ldp_lcd_config __initdata = {
.ctrl_name = "internal",
};
static struct omap_board_config_kernel ldp_config[] __initdata = {
{ OMAP_TAG_LCD, &ldp_lcd_config },
};
static void __init omap_ldp_init_early(void)
{
omap2_init_common_infrastructure();
omap2_init_common_devices(NULL, NULL);
}
static struct twl4030_usb_data ldp_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
static struct twl4030_gpio_platform_data ldp_gpio_data = {
.gpio_base = OMAP_MAX_GPIO_LINES,
.irq_base = TWL4030_GPIO_IRQ_BASE,
.irq_end = TWL4030_GPIO_IRQ_END,
};
static struct twl4030_madc_platform_data ldp_madc_data = {
.irq_line = 1,
};
static struct regulator_consumer_supply ldp_vmmc1_supply = {
.supply = "vmmc",
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data ldp_vmmc1 = {
.constraints = {
.min_uV = 1850000,
.max_uV = 3150000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldp_vmmc1_supply,
};
/* ads7846 on SPI */
static struct regulator_consumer_supply ldp_vaux1_supplies[] = {
REGULATOR_SUPPLY("vcc", "spi1.0"),
};
/* VAUX1 */
static struct regulator_init_data ldp_vaux1 = {
.constraints = {
.min_uV = 3000000,
.max_uV = 3000000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(ldp_vaux1_supplies),
.consumer_supplies = ldp_vaux1_supplies,
};
static struct twl4030_platform_data ldp_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
/* platform_data for children goes here */
.madc = &ldp_madc_data,
.usb = &ldp_usb_data,
.vmmc1 = &ldp_vmmc1,
.vaux1 = &ldp_vaux1,
.gpio = &ldp_gpio_data,
.keypad = &ldp_kp_twl4030_data,
};
static struct i2c_board_info __initdata ldp_i2c_boardinfo[] = {
{
I2C_BOARD_INFO("twl4030", 0x48),
.flags = I2C_CLIENT_WAKE,
.irq = INT_34XX_SYS_NIRQ,
.platform_data = &ldp_twldata,
},
};
static int __init omap_i2c_init(void)
{
omap_register_i2c_bus(1, 2600, ldp_i2c_boardinfo,
ARRAY_SIZE(ldp_i2c_boardinfo));
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, NULL, 0);
return 0;
}
static struct omap2_hsmmc_info mmc[] __initdata = {
{
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = -EINVAL,
},
{} /* Terminator */
};
static struct platform_device *ldp_devices[] __initdata = {
&ldp_smsc911x_device,
&ldp_lcd_device,
&ldp_gpio_keys_device,
};
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 100,
};
static struct mtd_partition ldp_nand_partitions[] = {
/* All the partition sizes are listed in terms of NAND block size */
{
.name = "X-Loader-NAND",
.offset = 0,
.size = 4 * (64 * 2048), /* 512KB, 0x80000 */
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
.name = "U-Boot-NAND",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
.size = 10 * (64 * 2048), /* 1.25MB, 0x140000 */
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
.name = "Boot Env-NAND",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x1c0000 */
.size = 2 * (64 * 2048), /* 256KB, 0x40000 */
},
{
.name = "Kernel-NAND",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x0200000*/
.size = 240 * (64 * 2048), /* 30M, 0x1E00000 */
},
{
.name = "File System - NAND",
.offset = MTDPART_OFS_APPEND, /* Offset = 0x2000000 */
.size = MTDPART_SIZ_FULL, /* 96MB, 0x6000000 */
},
};
static void __init omap_ldp_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap_board_config = ldp_config;
omap_board_config_size = ARRAY_SIZE(ldp_config);
ldp_init_smsc911x();
omap_i2c_init();
platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
ts_gpio = 54;
ldp_spi_board_info[0].irq = gpio_to_irq(ts_gpio);
spi_register_board_info(ldp_spi_board_info,
ARRAY_SIZE(ldp_spi_board_info));
ads7846_dev_init();
omap_serial_init();
usb_musb_init(&musb_board_data);
board_nand_init(ldp_nand_partitions,
ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
omap2_hsmmc_init(mmc);
/* link regulators to MMC adapters */
ldp_vmmc1_supply.dev = mmc[0].dev;
}
MACHINE_START(OMAP_LDP, "OMAP LDP board")
.boot_params = 0x80000100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap_ldp_init_early,
.init_irq = omap_init_irq,
.init_machine = omap_ldp_init,
.timer = &omap_timer,
MACHINE_END
| gpl-2.0 |
apasricha/KVMTrace-kernel-mod | drivers/iio/adc/ad7266.c | 152 | 13553 | /*
* AD7266/65 SPI ADC driver
*
* Copyright 2012 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/platform_data/ad7266.h>
struct ad7266_state {
struct spi_device *spi;
struct regulator *reg;
unsigned long vref_uv;
struct spi_transfer single_xfer[3];
struct spi_message single_msg;
enum ad7266_range range;
enum ad7266_mode mode;
bool fixed_addr;
struct gpio gpios[3];
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
* The buffer needs to be large enough to hold two samples (4 bytes) and
* the naturally aligned timestamp (8 bytes).
*/
uint8_t data[ALIGN(4, sizeof(s64)) + sizeof(s64)] ____cacheline_aligned;
};
static int ad7266_wakeup(struct ad7266_state *st)
{
/* Any read with >= 2 bytes will wake the device */
return spi_read(st->spi, st->data, 2);
}
static int ad7266_powerdown(struct ad7266_state *st)
{
/* Any read with < 2 bytes will powerdown the device */
return spi_read(st->spi, st->data, 1);
}
static int ad7266_preenable(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
int ret;
ret = ad7266_wakeup(st);
if (ret)
return ret;
ret = iio_sw_buffer_preenable(indio_dev);
if (ret)
ad7266_powerdown(st);
return ret;
}
static int ad7266_postdisable(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
return ad7266_powerdown(st);
}
static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
.preenable = &ad7266_preenable,
.postenable = &iio_triggered_buffer_postenable,
.predisable = &iio_triggered_buffer_predisable,
.postdisable = &ad7266_postdisable,
};
static irqreturn_t ad7266_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7266_state *st = iio_priv(indio_dev);
int ret;
ret = spi_read(st->spi, st->data, 4);
if (ret == 0) {
if (indio_dev->scan_timestamp)
((s64 *)st->data)[1] = pf->timestamp;
iio_push_to_buffers(indio_dev, (u8 *)st->data);
}
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
static void ad7266_select_input(struct ad7266_state *st, unsigned int nr)
{
unsigned int i;
if (st->fixed_addr)
return;
switch (st->mode) {
case AD7266_MODE_SINGLE_ENDED:
nr >>= 1;
break;
case AD7266_MODE_PSEUDO_DIFF:
nr |= 1;
break;
case AD7266_MODE_DIFF:
nr &= ~1;
break;
}
for (i = 0; i < 3; ++i)
gpio_set_value(st->gpios[i].gpio, (bool)(nr & BIT(i)));
}
static int ad7266_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7266_state *st = iio_priv(indio_dev);
unsigned int nr = find_first_bit(scan_mask, indio_dev->masklength);
ad7266_select_input(st, nr);
return 0;
}
static int ad7266_read_single(struct ad7266_state *st, int *val,
unsigned int address)
{
int ret;
ad7266_select_input(st, address);
ret = spi_sync(st->spi, &st->single_msg);
*val = be16_to_cpu(st->data[address % 2]);
return ret;
}
static int ad7266_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long m)
{
struct ad7266_state *st = iio_priv(indio_dev);
unsigned long scale_uv;
int ret;
switch (m) {
case IIO_CHAN_INFO_RAW:
if (iio_buffer_enabled(indio_dev))
return -EBUSY;
ret = ad7266_read_single(st, val, chan->address);
if (ret)
return ret;
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
*val = sign_extend32(*val, 11);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_uv * 100);
if (st->mode == AD7266_MODE_DIFF)
scale_uv *= 2;
if (st->range == AD7266_RANGE_2VREF)
scale_uv *= 2;
scale_uv >>= chan->scan_type.realbits;
*val = scale_uv / 100000;
*val2 = (scale_uv % 100000) * 10;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OFFSET:
if (st->range == AD7266_RANGE_2VREF &&
st->mode != AD7266_MODE_DIFF)
*val = 2048;
else
*val = 0;
return IIO_VAL_INT;
}
return -EINVAL;
}
#define AD7266_CHAN(_chan, _sign) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_chan), \
.address = (_chan), \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT \
| IIO_CHAN_INFO_SCALE_SHARED_BIT \
| IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.scan_index = (_chan), \
.scan_type = { \
.sign = (_sign), \
.realbits = 12, \
.storagebits = 16, \
.shift = 2, \
.endianness = IIO_BE, \
}, \
}
#define AD7266_DECLARE_SINGLE_ENDED_CHANNELS(_name, _sign) \
const struct iio_chan_spec ad7266_channels_##_name[] = { \
AD7266_CHAN(0, (_sign)), \
AD7266_CHAN(1, (_sign)), \
AD7266_CHAN(2, (_sign)), \
AD7266_CHAN(3, (_sign)), \
AD7266_CHAN(4, (_sign)), \
AD7266_CHAN(5, (_sign)), \
AD7266_CHAN(6, (_sign)), \
AD7266_CHAN(7, (_sign)), \
AD7266_CHAN(8, (_sign)), \
AD7266_CHAN(9, (_sign)), \
AD7266_CHAN(10, (_sign)), \
AD7266_CHAN(11, (_sign)), \
IIO_CHAN_SOFT_TIMESTAMP(13), \
}
#define AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(_name, _sign) \
const struct iio_chan_spec ad7266_channels_##_name##_fixed[] = { \
AD7266_CHAN(0, (_sign)), \
AD7266_CHAN(1, (_sign)), \
IIO_CHAN_SOFT_TIMESTAMP(2), \
}
static AD7266_DECLARE_SINGLE_ENDED_CHANNELS(u, 'u');
static AD7266_DECLARE_SINGLE_ENDED_CHANNELS(s, 's');
static AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(u, 'u');
static AD7266_DECLARE_SINGLE_ENDED_CHANNELS_FIXED(s, 's');
#define AD7266_CHAN_DIFF(_chan, _sign) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = (_chan) * 2, \
.channel2 = (_chan) * 2 + 1, \
.address = (_chan), \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT \
| IIO_CHAN_INFO_SCALE_SHARED_BIT \
| IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.scan_index = (_chan), \
.scan_type = { \
.sign = _sign, \
.realbits = 12, \
.storagebits = 16, \
.shift = 2, \
.endianness = IIO_BE, \
}, \
.differential = 1, \
}
#define AD7266_DECLARE_DIFF_CHANNELS(_name, _sign) \
const struct iio_chan_spec ad7266_channels_diff_##_name[] = { \
AD7266_CHAN_DIFF(0, (_sign)), \
AD7266_CHAN_DIFF(1, (_sign)), \
AD7266_CHAN_DIFF(2, (_sign)), \
AD7266_CHAN_DIFF(3, (_sign)), \
AD7266_CHAN_DIFF(4, (_sign)), \
AD7266_CHAN_DIFF(5, (_sign)), \
IIO_CHAN_SOFT_TIMESTAMP(6), \
}
static AD7266_DECLARE_DIFF_CHANNELS(s, 's');
static AD7266_DECLARE_DIFF_CHANNELS(u, 'u');
#define AD7266_DECLARE_DIFF_CHANNELS_FIXED(_name, _sign) \
const struct iio_chan_spec ad7266_channels_diff_fixed_##_name[] = { \
AD7266_CHAN_DIFF(0, (_sign)), \
AD7266_CHAN_DIFF(1, (_sign)), \
IIO_CHAN_SOFT_TIMESTAMP(2), \
}
static AD7266_DECLARE_DIFF_CHANNELS_FIXED(s, 's');
static AD7266_DECLARE_DIFF_CHANNELS_FIXED(u, 'u');
static const struct iio_info ad7266_info = {
.read_raw = &ad7266_read_raw,
.update_scan_mode = &ad7266_update_scan_mode,
.driver_module = THIS_MODULE,
};
static unsigned long ad7266_available_scan_masks[] = {
0x003,
0x00c,
0x030,
0x0c0,
0x300,
0xc00,
0x000,
};
static unsigned long ad7266_available_scan_masks_diff[] = {
0x003,
0x00c,
0x030,
0x000,
};
static unsigned long ad7266_available_scan_masks_fixed[] = {
0x003,
0x000,
};
struct ad7266_chan_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
unsigned long *scan_masks;
};
#define AD7266_CHAN_INFO_INDEX(_differential, _signed, _fixed) \
(((_differential) << 2) | ((_signed) << 1) | ((_fixed) << 0))
static const struct ad7266_chan_info ad7266_chan_infos[] = {
[AD7266_CHAN_INFO_INDEX(0, 0, 0)] = {
.channels = ad7266_channels_u,
.num_channels = ARRAY_SIZE(ad7266_channels_u),
.scan_masks = ad7266_available_scan_masks,
},
[AD7266_CHAN_INFO_INDEX(0, 0, 1)] = {
.channels = ad7266_channels_u_fixed,
.num_channels = ARRAY_SIZE(ad7266_channels_u_fixed),
.scan_masks = ad7266_available_scan_masks_fixed,
},
[AD7266_CHAN_INFO_INDEX(0, 1, 0)] = {
.channels = ad7266_channels_s,
.num_channels = ARRAY_SIZE(ad7266_channels_s),
.scan_masks = ad7266_available_scan_masks,
},
[AD7266_CHAN_INFO_INDEX(0, 1, 1)] = {
.channels = ad7266_channels_s_fixed,
.num_channels = ARRAY_SIZE(ad7266_channels_s_fixed),
.scan_masks = ad7266_available_scan_masks_fixed,
},
[AD7266_CHAN_INFO_INDEX(1, 0, 0)] = {
.channels = ad7266_channels_diff_u,
.num_channels = ARRAY_SIZE(ad7266_channels_diff_u),
.scan_masks = ad7266_available_scan_masks_diff,
},
[AD7266_CHAN_INFO_INDEX(1, 0, 1)] = {
.channels = ad7266_channels_diff_fixed_u,
.num_channels = ARRAY_SIZE(ad7266_channels_diff_fixed_u),
.scan_masks = ad7266_available_scan_masks_fixed,
},
[AD7266_CHAN_INFO_INDEX(1, 1, 0)] = {
.channels = ad7266_channels_diff_s,
.num_channels = ARRAY_SIZE(ad7266_channels_diff_s),
.scan_masks = ad7266_available_scan_masks_diff,
},
[AD7266_CHAN_INFO_INDEX(1, 1, 1)] = {
.channels = ad7266_channels_diff_fixed_s,
.num_channels = ARRAY_SIZE(ad7266_channels_diff_fixed_s),
.scan_masks = ad7266_available_scan_masks_fixed,
},
};
static void ad7266_init_channels(struct iio_dev *indio_dev)
{
struct ad7266_state *st = iio_priv(indio_dev);
bool is_differential, is_signed;
const struct ad7266_chan_info *chan_info;
int i;
is_differential = st->mode != AD7266_MODE_SINGLE_ENDED;
is_signed = (st->range == AD7266_RANGE_2VREF) |
(st->mode == AD7266_MODE_DIFF);
i = AD7266_CHAN_INFO_INDEX(is_differential, is_signed, st->fixed_addr);
chan_info = &ad7266_chan_infos[i];
indio_dev->channels = chan_info->channels;
indio_dev->num_channels = chan_info->num_channels;
indio_dev->available_scan_masks = chan_info->scan_masks;
indio_dev->masklength = chan_info->num_channels - 1;
}
static const char * const ad7266_gpio_labels[] = {
"AD0", "AD1", "AD2",
};
static int ad7266_probe(struct spi_device *spi)
{
struct ad7266_platform_data *pdata = spi->dev.platform_data;
struct iio_dev *indio_dev;
struct ad7266_state *st;
unsigned int i;
int ret;
indio_dev = iio_device_alloc(sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
st->reg = regulator_get(&spi->dev, "vref");
if (!IS_ERR_OR_NULL(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
ret = regulator_get_voltage(st->reg);
if (ret < 0)
goto error_disable_reg;
st->vref_uv = ret;
} else {
/* Use internal reference */
st->vref_uv = 2500000;
}
if (pdata) {
st->fixed_addr = pdata->fixed_addr;
st->mode = pdata->mode;
st->range = pdata->range;
if (!st->fixed_addr) {
for (i = 0; i < ARRAY_SIZE(st->gpios); ++i) {
st->gpios[i].gpio = pdata->addr_gpios[i];
st->gpios[i].flags = GPIOF_OUT_INIT_LOW;
st->gpios[i].label = ad7266_gpio_labels[i];
}
ret = gpio_request_array(st->gpios,
ARRAY_SIZE(st->gpios));
if (ret)
goto error_disable_reg;
}
} else {
st->fixed_addr = true;
st->range = AD7266_RANGE_VREF;
st->mode = AD7266_MODE_DIFF;
}
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7266_info;
ad7266_init_channels(indio_dev);
/* wakeup */
st->single_xfer[0].rx_buf = &st->data;
st->single_xfer[0].len = 2;
st->single_xfer[0].cs_change = 1;
/* conversion */
st->single_xfer[1].rx_buf = &st->data;
st->single_xfer[1].len = 4;
st->single_xfer[1].cs_change = 1;
/* powerdown */
st->single_xfer[2].tx_buf = &st->data;
st->single_xfer[2].len = 1;
spi_message_init(&st->single_msg);
spi_message_add_tail(&st->single_xfer[0], &st->single_msg);
spi_message_add_tail(&st->single_xfer[1], &st->single_msg);
spi_message_add_tail(&st->single_xfer[2], &st->single_msg);
ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
&ad7266_trigger_handler, &iio_triggered_buffer_setup_ops);
if (ret)
goto error_free_gpios;
ret = iio_device_register(indio_dev);
if (ret)
goto error_buffer_cleanup;
return 0;
error_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
error_free_gpios:
if (!st->fixed_addr)
gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
error_disable_reg:
if (!IS_ERR_OR_NULL(st->reg))
regulator_disable(st->reg);
error_put_reg:
if (!IS_ERR_OR_NULL(st->reg))
regulator_put(st->reg);
iio_device_free(indio_dev);
return ret;
}
static int ad7266_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
struct ad7266_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
if (!st->fixed_addr)
gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
if (!IS_ERR_OR_NULL(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
iio_device_free(indio_dev);
return 0;
}
static const struct spi_device_id ad7266_id[] = {
{"ad7265", 0},
{"ad7266", 0},
{ }
};
MODULE_DEVICE_TABLE(spi, ad7266_id);
static struct spi_driver ad7266_driver = {
.driver = {
.name = "ad7266",
.owner = THIS_MODULE,
},
.probe = ad7266_probe,
.remove = ad7266_remove,
.id_table = ad7266_id,
};
module_spi_driver(ad7266_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD7266/65 ADC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
NStep/nx_bullhead | arch/mips/mm/mmap.c | 152 | 4798 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
static unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long addr = addr0;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
/* requesting a specific address */
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
/*
* There is no need to export this but sched.h declares the function as
* extern so making it static here results in an error.
*/
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}
void arch_pick_mmap_layout(struct mm_struct *mm)
{
unsigned long random_factor = 0UL;
if (current->flags & PF_RANDOMIZE) {
random_factor = get_random_long();
random_factor = random_factor << PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
random_factor &= 0xfffffful;
else
random_factor &= 0xffffffful;
}
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
static inline unsigned long brk_rnd(void)
{
unsigned long rnd = get_random_long();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
if (TASK_IS_32BIT_ADDR)
rnd = rnd & 0x7ffffful;
else
rnd = rnd & 0xffffffful;
return rnd;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
unsigned long base = mm->brk;
unsigned long ret;
ret = PAGE_ALIGN(base + brk_rnd());
if (ret < mm->brk)
return mm->brk;
return ret;
}
int __virt_addr_valid(const volatile void *kaddr)
{
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
}
EXPORT_SYMBOL_GPL(__virt_addr_valid);
| gpl-2.0 |
AnesHadzi/linux-socfpga | drivers/net/wireless/rsi/rsi_91x_usb.c | 1688 | 15015 | /**
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include <linux/module.h>
#include "rsi_usb.h"
/**
* rsi_usb_card_write() - This function writes to the USB Card.
* @adapter: Pointer to the adapter structure.
* @buf: Pointer to the buffer from where the data has to be taken.
* @len: Length to be written.
* @endpoint: Type of endpoint.
*
* Return: status: 0 on success, a negative error code on failure.
*/
static int rsi_usb_card_write(struct rsi_hw *adapter,
void *buf,
u16 len,
u8 endpoint)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
int status;
s32 transfer;
status = usb_bulk_msg(dev->usbdev,
usb_sndbulkpipe(dev->usbdev,
dev->bulkout_endpoint_addr[endpoint - 1]),
buf,
len,
&transfer,
HZ * 5);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"Card write failed with error code :%10d\n", status);
dev->write_fail = 1;
}
return status;
}
/**
* rsi_write_multiple() - This function writes multiple bytes of information
* to the USB card.
* @adapter: Pointer to the adapter structure.
* @addr: Address of the register.
* @data: Pointer to the data that has to be written.
* @count: Number of multiple bytes to be written.
*
* Return: 0 on success, a negative error code on failure.
*/
static int rsi_write_multiple(struct rsi_hw *adapter,
u8 endpoint,
u8 *data,
u32 count)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
u8 *seg = dev->tx_buffer;
if (dev->write_fail)
return 0;
if (endpoint == MGMT_EP) {
memset(seg, 0, RSI_USB_TX_HEAD_ROOM);
memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count);
} else {
seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM);
}
return rsi_usb_card_write(adapter,
seg,
count + RSI_USB_TX_HEAD_ROOM,
endpoint);
}
/**
* rsi_find_bulk_in_and_out_endpoints() - This function initializes the bulk
* endpoints to the device.
* @interface: Pointer to the USB interface structure.
* @adapter: Pointer to the adapter structure.
*
* Return: ret_val: 0 on success, -ENOMEM on failure.
*/
static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
struct rsi_hw *adapter)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
__le16 buffer_size;
int ii, bep_found = 0;
iface_desc = &(interface->altsetting[0]);
for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
endpoint = &(iface_desc->endpoint[ii].desc);
if ((!(dev->bulkin_endpoint_addr)) &&
(endpoint->bEndpointAddress & USB_DIR_IN) &&
((endpoint->bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
buffer_size = endpoint->wMaxPacketSize;
dev->bulkin_size = buffer_size;
dev->bulkin_endpoint_addr =
endpoint->bEndpointAddress;
}
if (!dev->bulkout_endpoint_addr[bep_found] &&
!(endpoint->bEndpointAddress & USB_DIR_IN) &&
((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK)) {
dev->bulkout_endpoint_addr[bep_found] =
endpoint->bEndpointAddress;
buffer_size = endpoint->wMaxPacketSize;
dev->bulkout_size[bep_found] = buffer_size;
bep_found++;
}
if (bep_found >= MAX_BULK_EP)
break;
}
if (!(dev->bulkin_endpoint_addr) &&
(dev->bulkout_endpoint_addr[0]))
return -EINVAL;
return 0;
}
/* rsi_usb_reg_read() - This function reads data from given register address.
* @usbdev: Pointer to the usb_device structure.
* @reg: Address of the register to be read.
* @value: Value to be read.
* @len: length of data to be read.
*
* Return: status: 0 on success, a negative error code on failure.
*/
static int rsi_usb_reg_read(struct usb_device *usbdev,
u32 reg,
u16 *value,
u16 len)
{
u8 *buf;
int status = -ENOMEM;
buf = kmalloc(0x04, GFP_KERNEL);
if (!buf)
return status;
status = usb_control_msg(usbdev,
usb_rcvctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_READ,
USB_TYPE_VENDOR,
((reg & 0xffff0000) >> 16), (reg & 0xffff),
(void *)buf,
len,
HZ * 5);
*value = (buf[0] | (buf[1] << 8));
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Reg read failed with error code :%d\n",
__func__, status);
}
kfree(buf);
return status;
}
/**
* rsi_usb_reg_write() - This function writes the given data into the given
* register address.
* @usbdev: Pointer to the usb_device structure.
* @reg: Address of the register.
* @value: Value to write.
* @len: Length of data to be written.
*
* Return: status: 0 on success, a negative error code on failure.
*/
static int rsi_usb_reg_write(struct usb_device *usbdev,
u32 reg,
u16 value,
u16 len)
{
u8 *usb_reg_buf;
int status = -ENOMEM;
usb_reg_buf = kmalloc(0x04, GFP_KERNEL);
if (!usb_reg_buf)
return status;
usb_reg_buf[0] = (value & 0x00ff);
usb_reg_buf[1] = (value & 0xff00) >> 8;
usb_reg_buf[2] = 0x0;
usb_reg_buf[3] = 0x0;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
USB_TYPE_VENDOR,
((reg & 0xffff0000) >> 16),
(reg & 0xffff),
(void *)usb_reg_buf,
len,
HZ * 5);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: Reg write failed with error code :%d\n",
__func__, status);
}
kfree(usb_reg_buf);
return status;
}
/**
* rsi_rx_done_handler() - This function is called when a packet is received
* from USB stack. This is callback to recieve done.
* @urb: Received URB.
*
* Return: None.
*/
static void rsi_rx_done_handler(struct urb *urb)
{
struct rsi_hw *adapter = urb->context;
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
if (urb->status)
return;
rsi_set_event(&dev->rx_thread.event);
}
/**
* rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
* @adapter: Pointer to the adapter structure.
*
* Return: 0 on success, a negative error code on failure.
*/
static int rsi_rx_urb_submit(struct rsi_hw *adapter)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
struct urb *urb = dev->rx_usb_urb[0];
int status;
usb_fill_bulk_urb(urb,
dev->usbdev,
usb_rcvbulkpipe(dev->usbdev,
dev->bulkin_endpoint_addr),
urb->transfer_buffer,
3000,
rsi_rx_done_handler,
adapter);
status = usb_submit_urb(urb, GFP_KERNEL);
if (status)
rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
return status;
}
/**
* rsi_usb_write_register_multiple() - This function writes multiple bytes of
* information to multiple registers.
* @adapter: Pointer to the adapter structure.
* @addr: Address of the register.
* @data: Pointer to the data that has to be written.
* @count: Number of multiple bytes to be written on to the registers.
*
* Return: status: 0 on success, a negative error code on failure.
*/
int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
u32 addr,
u8 *data,
u32 count)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
u8 *buf;
u8 transfer;
int status = 0;
buf = kzalloc(4096, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (count) {
transfer = (u8)(min_t(u32, count, 4096));
memcpy(buf, data, transfer);
status = usb_control_msg(dev->usbdev,
usb_sndctrlpipe(dev->usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
USB_TYPE_VENDOR,
((addr & 0xffff0000) >> 16),
(addr & 0xffff),
(void *)buf,
transfer,
HZ * 5);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"Reg write failed with error code :%d\n",
status);
} else {
count -= transfer;
data += transfer;
addr += transfer;
}
}
kfree(buf);
return 0;
}
/**
*rsi_usb_host_intf_write_pkt() - This function writes the packet to the
* USB card.
* @adapter: Pointer to the adapter structure.
* @pkt: Pointer to the data to be written on to the card.
* @len: Length of the data to be written on to the card.
*
* Return: 0 on success, a negative error code on failure.
*/
static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
u8 *pkt,
u32 len)
{
u32 queueno = ((pkt[1] >> 4) & 0xf);
u8 endpoint;
endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP);
return rsi_write_multiple(adapter,
endpoint,
(u8 *)pkt,
len);
}
/**
* rsi_deinit_usb_interface() - This function deinitializes the usb interface.
* @adapter: Pointer to the adapter structure.
*
* Return: None.
*/
static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
{
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
rsi_kill_thread(&dev->rx_thread);
usb_free_urb(dev->rx_usb_urb[0]);
kfree(adapter->priv->rx_data_pkt);
kfree(dev->tx_buffer);
}
/**
* rsi_init_usb_interface() - This function initializes the usb interface.
* @adapter: Pointer to the adapter structure.
* @pfunction: Pointer to USB interface structure.
*
* Return: 0 on success, a negative error code on failure.
*/
static int rsi_init_usb_interface(struct rsi_hw *adapter,
struct usb_interface *pfunction)
{
struct rsi_91x_usbdev *rsi_dev;
struct rsi_common *common = adapter->priv;
int status;
rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL);
if (!rsi_dev)
return -ENOMEM;
adapter->rsi_dev = rsi_dev;
rsi_dev->usbdev = interface_to_usbdev(pfunction);
if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter))
return -EINVAL;
adapter->device = &pfunction->dev;
usb_set_intfdata(pfunction, adapter);
common->rx_data_pkt = kmalloc(2048, GFP_KERNEL);
if (!common->rx_data_pkt) {
rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n",
__func__);
return -ENOMEM;
}
rsi_dev->tx_buffer = kmalloc(2048, GFP_KERNEL);
if (!rsi_dev->tx_buffer) {
status = -ENOMEM;
goto fail_tx;
}
rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
if (!rsi_dev->rx_usb_urb[0]) {
status = -ENOMEM;
goto fail_rx;
}
rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
rsi_dev->tx_blk_size = 252;
/* Initializing function callbacks */
adapter->rx_urb_submit = rsi_rx_urb_submit;
adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
adapter->check_hw_queue_status = rsi_usb_check_queue_status;
adapter->determine_event_timeout = rsi_usb_event_timeout;
rsi_init_event(&rsi_dev->rx_thread.event);
status = rsi_create_kthread(common, &rsi_dev->rx_thread,
rsi_usb_rx_thread, "RX-Thread");
if (status) {
rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
goto fail_thread;
}
#ifdef CONFIG_RSI_DEBUGFS
/* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */
adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1);
#endif
rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
return 0;
fail_thread:
usb_free_urb(rsi_dev->rx_usb_urb[0]);
fail_rx:
kfree(rsi_dev->tx_buffer);
fail_tx:
kfree(common->rx_data_pkt);
return status;
}
/**
* rsi_probe() - This function is called by kernel when the driver provided
* Vendor and device IDs are matched. All the initialization
* work is done here.
* @pfunction: Pointer to the USB interface structure.
* @id: Pointer to the usb_device_id structure.
*
* Return: 0 on success, a negative error code on failure.
*/
static int rsi_probe(struct usb_interface *pfunction,
const struct usb_device_id *id)
{
struct rsi_hw *adapter;
struct rsi_91x_usbdev *dev;
u16 fw_status;
int status;
rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
adapter = rsi_91x_init();
if (!adapter) {
rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
__func__);
return -ENOMEM;
}
status = rsi_init_usb_interface(adapter, pfunction);
if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
__func__);
goto err;
}
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2);
if (status)
goto err1;
else
fw_status &= 1;
if (!fw_status) {
status = rsi_usb_device_init(adapter->priv);
if (status) {
rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
__func__);
goto err1;
}
status = rsi_usb_reg_write(dev->usbdev,
USB_INTERNAL_REG_1,
RSI_USB_READY_MAGIC_NUM, 1);
if (status)
goto err1;
rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
}
status = rsi_rx_urb_submit(adapter);
if (status)
goto err1;
return 0;
err1:
rsi_deinit_usb_interface(adapter);
err:
rsi_91x_deinit(adapter);
rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
return status;
}
/**
* rsi_disconnect() - This function performs the reverse of the probe function,
* it deintialize the driver structure.
* @pfunction: Pointer to the USB interface structure.
*
* Return: None.
*/
static void rsi_disconnect(struct usb_interface *pfunction)
{
struct rsi_hw *adapter = usb_get_intfdata(pfunction);
if (!adapter)
return;
rsi_mac80211_detach(adapter);
rsi_deinit_usb_interface(adapter);
rsi_91x_deinit(adapter);
rsi_dbg(INFO_ZONE, "%s: Deinitialization completed\n", __func__);
}
#ifdef CONFIG_PM
static int rsi_suspend(struct usb_interface *intf, pm_message_t message)
{
/* Not yet implemented */
return -ENOSYS;
}
static int rsi_resume(struct usb_interface *intf)
{
/* Not yet implemented */
return -ENOSYS;
}
#endif
static const struct usb_device_id rsi_dev_table[] = {
{ USB_DEVICE(0x0303, 0x0100) },
{ USB_DEVICE(0x041B, 0x0301) },
{ USB_DEVICE(0x041B, 0x0201) },
{ USB_DEVICE(0x041B, 0x9330) },
{ /* Blank */},
};
static struct usb_driver rsi_driver = {
.name = "RSI-USB WLAN",
.probe = rsi_probe,
.disconnect = rsi_disconnect,
.id_table = rsi_dev_table,
#ifdef CONFIG_PM
.suspend = rsi_suspend,
.resume = rsi_resume,
#endif
};
module_usb_driver(rsi_driver);
MODULE_AUTHOR("Redpine Signals Inc");
MODULE_DESCRIPTION("Common USB layer for RSI drivers");
MODULE_SUPPORTED_DEVICE("RSI-91x");
MODULE_DEVICE_TABLE(usb, rsi_dev_table);
MODULE_FIRMWARE(FIRMWARE_RSI9113);
MODULE_VERSION("0.1");
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
Pating/linux-2.6.12-rc2 | arch/parisc/math-emu/dfsub.c | 1944 | 15897 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfsub.c $Revision: 1.1 $
*
* Purpose:
* Double_subtract: subtract two double precision values.
*
* External Interfaces:
* dbl_fsub(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double_subtract: subtract two double precision values.
*/
int
dbl_fsub(
dbl_floating_point *leftptr,
dbl_floating_point *rightptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int signless_upper_left, signless_upper_right, save;
register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
register unsigned int resultp1 = 0, resultp2 = 0;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE, underflowtrap;
/* Create local copies of the numbers */
Dbl_copyfromptr(leftptr,leftp1,leftp2);
Dbl_copyfromptr(rightptr,rightp1,rightp2);
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Dbl_xortointp1(leftp1,rightp1,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
{
if (Dbl_iszero_mantissa(leftp1,leftp2))
{
if (Dbl_isnotnan(rightp1,rightp2))
{
if (Dbl_isinfinity(rightp1,rightp2) && save==0)
{
/*
* invalid since operands are same signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(leftp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(leftp1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(rightp1))
{
if (Dbl_iszero_mantissa(rightp1,rightp2))
{
/* return infinity */
Dbl_invert_sign(rightp1);
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
/* sign difference selects add or sub operation. */
if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
Dbl_swap_lower(leftp2,rightp2);
result_exponent = Dbl_exponent(leftp1);
Dbl_invert_sign(leftp1);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Dbl_exponent(rightp1)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Dbl_iszero_mantissa(rightp1,rightp2))
{
/* right is zero */
if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
{
/* Both operands are zeros */
Dbl_invert_sign(rightp1);
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_or_signs(leftp1,/*with*/rightp1);
}
else
{
Dbl_and_signs(leftp1,/*with*/rightp1);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(leftp1);
Dbl_leftshiftby1(leftp1,leftp2);
Dbl_normalize(leftp1,leftp2,result_exponent);
Dbl_set_sign(leftp1,/*using*/sign_save);
Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
Dbl_copytoptr(leftp1,leftp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
}
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
/* Neither are zeroes */
Dbl_clear_sign(rightp1); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save >= 0 )
{
Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_iszero_mantissa(resultp1,resultp2))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_setone_sign(resultp1);
}
else
{
Dbl_setzero_sign(resultp1);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
Dbl_addition(leftp1,leftp2,rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_isone_hidden(resultp1))
{
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1(resultp1,resultp2);
Dbl_normalize(resultp1,resultp2,result_exponent);
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
}
else
{
Dbl_clear_signexponent_set_hidden(rightp1);
}
Dbl_clear_exponent_set_hidden(leftp1);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > DBL_THRESHOLD)
{
diff_exponent = DBL_THRESHOLD;
}
/* Align right operand by shifting to right */
Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save >= 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*with*/extent,/*into*/resultp1,resultp2);
if(Dbl_iszero_hidden(resultp1))
{
/* Handle normalization */
/* A straight foward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Dbl_iszero(resultp1,resultp2))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Dbl_isone_hidden(resultp1))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Dbl_set_sign(resultp1,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
{
Dbl_leftshiftby8(resultp1,resultp2);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
{
/* The lower nibble contains the normalizing one */
Dbl_leftshiftby4(resultp1,resultp2);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dbl_sethigh4bits(resultp1,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Dbl_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Dbl_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Dbl_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION); /* Sign bit is already set */
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Dbl_set_sign(resultp1,sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
Dbl_clear_signexponent(resultp1);
Dbl_set_sign(resultp1,sign_save);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save >= 0)... */
else
{
/* Subtract magnitudes */
Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
if(Dbl_isone_hiddenoverflow(resultp1))
{
/* Prenormalization required. */
Dbl_rightshiftby1_withextent(resultp2,extent,extent);
Dbl_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...subtract magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Dbl_isone_lowmantissap2(resultp2))
{
/* either exactly half way and odd or more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if(Dbl_iszero_sign(resultp1))
{
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if(Dbl_isone_sign(resultp1))
{
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if(result_exponent == DBL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
inexact = TRUE;
Set_overflowflag();
Dbl_setoverflow(resultp1,resultp2);
}
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if(inexact)
if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| gpl-2.0 |
allenbh/ntrdma | drivers/gpu/drm/gma500/mmu.c | 1944 | 18745 | /**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
**************************************************************************/
#include <drm/drmP.h>
#include "psb_drv.h"
#include "psb_reg.h"
#include "mmu.h"
/*
* Code for the SGX MMU:
*/
/*
* clflush on one processor only:
* clflush should apparently flush the cache line on all processors in an
* SMP system.
*/
/*
* kmap atomic:
* The usage of the slots must be completely encapsulated within a spinlock, and
* no other functions that may be using the locks for other purposed may be
* called from within the locked region.
* Since the slots are per processor, this will guarantee that we are the only
* user.
*/
/*
* TODO: Inserting ptes from an interrupt handler:
* This may be desirable for some SGX functionality where the GPU can fault in
* needed pages. For that, we need to make an atomic insert_pages function, that
* may fail.
* If it fails, the caller need to insert the page using a workqueue function,
* but on average it should be fast.
*/
static inline uint32_t psb_mmu_pt_index(uint32_t offset)
{
return (offset >> PSB_PTE_SHIFT) & 0x3FF;
}
static inline uint32_t psb_mmu_pd_index(uint32_t offset)
{
return offset >> PSB_PDE_SHIFT;
}
#if defined(CONFIG_X86)
static inline void psb_clflush(void *addr)
{
__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
}
static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
{
if (!driver->has_clflush)
return;
mb();
psb_clflush(addr);
mb();
}
#else
static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
{;
}
#endif
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
if (atomic_read(&driver->needs_tlbflush) || force) {
uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
/* Make sure data cache is turned off before enabling it */
wmb();
PSB_WSGX32(val & ~_PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
if (driver->msvdx_mmu_invaldc)
atomic_set(driver->msvdx_mmu_invaldc, 1);
}
atomic_set(&driver->needs_tlbflush, 0);
}
#if 0
static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
{
down_write(&driver->sem);
psb_mmu_flush_pd_locked(driver, force);
up_write(&driver->sem);
}
#endif
void psb_mmu_flush(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t val;
down_write(&driver->sem);
val = PSB_RSGX32(PSB_CR_BIF_CTRL);
if (atomic_read(&driver->needs_tlbflush))
PSB_WSGX32(val | _PSB_CB_CTRL_INVALDC, PSB_CR_BIF_CTRL);
else
PSB_WSGX32(val | _PSB_CB_CTRL_FLUSH, PSB_CR_BIF_CTRL);
/* Make sure data cache is turned off and MMU is flushed before
restoring bank interface control register */
wmb();
PSB_WSGX32(val & ~(_PSB_CB_CTRL_FLUSH | _PSB_CB_CTRL_INVALDC),
PSB_CR_BIF_CTRL);
(void)PSB_RSGX32(PSB_CR_BIF_CTRL);
atomic_set(&driver->needs_tlbflush, 0);
if (driver->msvdx_mmu_invaldc)
atomic_set(driver->msvdx_mmu_invaldc, 1);
up_write(&driver->sem);
}
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
struct drm_device *dev = pd->driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
down_write(&pd->driver->sem);
PSB_WSGX32(page_to_pfn(pd->p) << PAGE_SHIFT, offset);
wmb();
psb_mmu_flush_pd_locked(pd->driver, 1);
pd->hw_context = hw_context;
up_write(&pd->driver->sem);
}
static inline unsigned long psb_pd_addr_end(unsigned long addr,
unsigned long end)
{
addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
return (addr < end) ? addr : end;
}
static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
{
uint32_t mask = PSB_PTE_VALID;
if (type & PSB_MMU_CACHED_MEMORY)
mask |= PSB_PTE_CACHED;
if (type & PSB_MMU_RO_MEMORY)
mask |= PSB_PTE_RO;
if (type & PSB_MMU_WO_MEMORY)
mask |= PSB_PTE_WO;
return (pfn << PAGE_SHIFT) | mask;
}
struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
int trap_pagefaults, int invalid_type)
{
struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
uint32_t *v;
int i;
if (!pd)
return NULL;
pd->p = alloc_page(GFP_DMA32);
if (!pd->p)
goto out_err1;
pd->dummy_pt = alloc_page(GFP_DMA32);
if (!pd->dummy_pt)
goto out_err2;
pd->dummy_page = alloc_page(GFP_DMA32);
if (!pd->dummy_page)
goto out_err3;
if (!trap_pagefaults) {
pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
invalid_type);
pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
invalid_type);
} else {
pd->invalid_pde = 0;
pd->invalid_pte = 0;
}
v = kmap(pd->dummy_pt);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pte;
kunmap(pd->dummy_pt);
v = kmap(pd->p);
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
v[i] = pd->invalid_pde;
kunmap(pd->p);
clear_page(kmap(pd->dummy_page));
kunmap(pd->dummy_page);
pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
if (!pd->tables)
goto out_err4;
pd->hw_context = -1;
pd->pd_mask = PSB_PTE_VALID;
pd->driver = driver;
return pd;
out_err4:
__free_page(pd->dummy_page);
out_err3:
__free_page(pd->dummy_pt);
out_err2:
__free_page(pd->p);
out_err1:
kfree(pd);
return NULL;
}
static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
{
__free_page(pt->p);
kfree(pt);
}
void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_mmu_pt *pt;
int i;
down_write(&driver->sem);
if (pd->hw_context != -1) {
PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4);
psb_mmu_flush_pd_locked(driver, 1);
}
/* Should take the spinlock here, but we don't need to do that
since we have the semaphore in write mode. */
for (i = 0; i < 1024; ++i) {
pt = pd->tables[i];
if (pt)
psb_mmu_free_pt(pt);
}
vfree(pd->tables);
__free_page(pd->dummy_page);
__free_page(pd->dummy_pt);
__free_page(pd->p);
kfree(pd);
up_write(&driver->sem);
}
static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
{
struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
void *v;
uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
uint32_t clflush_count = PAGE_SIZE / clflush_add;
spinlock_t *lock = &pd->driver->lock;
uint8_t *clf;
uint32_t *ptes;
int i;
if (!pt)
return NULL;
pt->p = alloc_page(GFP_DMA32);
if (!pt->p) {
kfree(pt);
return NULL;
}
spin_lock(lock);
v = kmap_atomic(pt->p);
clf = (uint8_t *) v;
ptes = (uint32_t *) v;
for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
*ptes++ = pd->invalid_pte;
#if defined(CONFIG_X86)
if (pd->driver->has_clflush && pd->hw_context != -1) {
mb();
for (i = 0; i < clflush_count; ++i) {
psb_clflush(clf);
clf += clflush_add;
}
mb();
}
#endif
kunmap_atomic(v);
spin_unlock(lock);
pt->count = 0;
pt->pd = pd;
pt->index = 0;
return pt;
}
struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
uint32_t *v;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
while (!pt) {
spin_unlock(lock);
pt = psb_mmu_alloc_pt(pd);
if (!pt)
return NULL;
spin_lock(lock);
if (pd->tables[index]) {
spin_unlock(lock);
psb_mmu_free_pt(pt);
spin_lock(lock);
pt = pd->tables[index];
continue;
}
v = kmap_atomic(pd->p);
pd->tables[index] = pt;
v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
pt->index = index;
kunmap_atomic((void *) v);
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *)&v[index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
unsigned long addr)
{
uint32_t index = psb_mmu_pd_index(addr);
struct psb_mmu_pt *pt;
spinlock_t *lock = &pd->driver->lock;
spin_lock(lock);
pt = pd->tables[index];
if (!pt) {
spin_unlock(lock);
return NULL;
}
pt->v = kmap_atomic(pt->p);
return pt;
}
static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
{
struct psb_mmu_pd *pd = pt->pd;
uint32_t *v;
kunmap_atomic(pt->v);
if (pt->count == 0) {
v = kmap_atomic(pd->p);
v[pt->index] = pd->invalid_pde;
pd->tables[pt->index] = NULL;
if (pd->hw_context != -1) {
psb_mmu_clflush(pd->driver, (void *)&v[pt->index]);
atomic_set(&pd->driver->needs_tlbflush, 1);
}
kunmap_atomic(pt->v);
spin_unlock(&pd->driver->lock);
psb_mmu_free_pt(pt);
return;
}
spin_unlock(&pd->driver->lock);
}
static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr,
uint32_t pte)
{
pt->v[psb_mmu_pt_index(addr)] = pte;
}
static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
unsigned long addr)
{
pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
}
struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
{
struct psb_mmu_pd *pd;
down_read(&driver->sem);
pd = driver->default_pd;
up_read(&driver->sem);
return pd;
}
/* Returns the physical address of the PD shared by sgx/msvdx */
uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
{
struct psb_mmu_pd *pd;
pd = psb_mmu_get_default_pd(driver);
return page_to_pfn(pd->p) << PAGE_SHIFT;
}
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
kfree(driver);
}
struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
int trap_pagefaults,
int invalid_type,
atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
struct drm_psb_private *dev_priv = dev->dev_private;
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
return NULL;
driver->dev = dev;
driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
invalid_type);
if (!driver->default_pd)
goto out_err1;
spin_lock_init(&driver->lock);
init_rwsem(&driver->sem);
down_write(&driver->sem);
atomic_set(&driver->needs_tlbflush, 1);
driver->msvdx_mmu_invaldc = msvdx_mmu_invaldc;
driver->bif_ctrl = PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
PSB_WSGX32(driver->bif_ctrl & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
driver->has_clflush = 0;
#if defined(CONFIG_X86)
if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
uint32_t tfms, misc, cap0, cap4, clflush_size;
/*
* clflush size is determined at kernel setup for x86_64 but not
* for i386. We have to do it here.
*/
cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
clflush_size = ((misc >> 8) & 0xff) * 8;
driver->has_clflush = 1;
driver->clflush_add =
PAGE_SIZE * clflush_size / sizeof(uint32_t);
driver->clflush_mask = driver->clflush_add - 1;
driver->clflush_mask = ~driver->clflush_mask;
}
#endif
up_write(&driver->sem);
return driver;
out_err1:
kfree(driver);
return NULL;
}
#if defined(CONFIG_X86)
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long clflush_add = pd->driver->clflush_add;
unsigned long clflush_mask = pd->driver->clflush_mask;
if (!pd->driver->has_clflush)
return;
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
mb();
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_clflush(&pt->v[psb_mmu_pt_index(addr)]);
} while (addr += clflush_add,
(addr & clflush_mask) < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
mb();
}
#else
static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
drm_ttm_cache_flush();
}
#endif
void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
unsigned long address, uint32_t num_pages)
{
struct psb_mmu_pt *pt;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt)
goto out;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return;
}
void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
uint32_t num_pages, uint32_t desired_tile_stride,
uint32_t hw_tile_stride)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
if (hw_tile_stride)
rows = num_pages / desired_tile_stride;
else
desired_tile_stride = num_pages;
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
down_read(&pd->driver->sem);
/* Make sure we only need to flush this processor's cache */
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_map_lock(pd, addr);
if (!pt)
continue;
do {
psb_mmu_invalidate_pte(pt, addr);
--pt->count;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
}
int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
unsigned long address, uint32_t num_pages,
int type)
{
struct psb_mmu_pt *pt;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long f_address = address;
int ret = -ENOMEM;
down_read(&pd->driver->sem);
addr = address;
end = addr + (num_pages << PAGE_SHIFT);
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt) {
ret = -ENOMEM;
goto out;
}
do {
pte = psb_mmu_mask_pte(start_pfn++, type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
ret = 0;
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return 0;
}
int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
unsigned long address, uint32_t num_pages,
uint32_t desired_tile_stride, uint32_t hw_tile_stride,
int type)
{
struct psb_mmu_pt *pt;
uint32_t rows = 1;
uint32_t i;
uint32_t pte;
unsigned long addr;
unsigned long end;
unsigned long next;
unsigned long add;
unsigned long row_add;
unsigned long f_address = address;
int ret = -ENOMEM;
if (hw_tile_stride) {
if (num_pages % desired_tile_stride != 0)
return -EINVAL;
rows = num_pages / desired_tile_stride;
} else {
desired_tile_stride = num_pages;
}
add = desired_tile_stride << PAGE_SHIFT;
row_add = hw_tile_stride << PAGE_SHIFT;
down_read(&pd->driver->sem);
for (i = 0; i < rows; ++i) {
addr = address;
end = addr + add;
do {
next = psb_pd_addr_end(addr, end);
pt = psb_mmu_pt_alloc_map_lock(pd, addr);
if (!pt)
goto out;
do {
pte = psb_mmu_mask_pte(page_to_pfn(*pages++),
type);
psb_mmu_set_pte(pt, addr, pte);
pt->count++;
} while (addr += PAGE_SIZE, addr < next);
psb_mmu_pt_unmap_unlock(pt);
} while (addr = next, next != end);
address += row_add;
}
ret = 0;
out:
if (pd->hw_context != -1)
psb_mmu_flush_ptes(pd, f_address, num_pages,
desired_tile_stride, hw_tile_stride);
up_read(&pd->driver->sem);
if (pd->hw_context != -1)
psb_mmu_flush(pd->driver);
return ret;
}
int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
unsigned long *pfn)
{
int ret;
struct psb_mmu_pt *pt;
uint32_t tmp;
spinlock_t *lock = &pd->driver->lock;
down_read(&pd->driver->sem);
pt = psb_mmu_pt_map_lock(pd, virtual);
if (!pt) {
uint32_t *v;
spin_lock(lock);
v = kmap_atomic(pd->p);
tmp = v[psb_mmu_pd_index(virtual)];
kunmap_atomic(v);
spin_unlock(lock);
if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
!(pd->invalid_pte & PSB_PTE_VALID)) {
ret = -EINVAL;
goto out;
}
ret = 0;
*pfn = pd->invalid_pte >> PAGE_SHIFT;
goto out;
}
tmp = pt->v[psb_mmu_pt_index(virtual)];
if (!(tmp & PSB_PTE_VALID)) {
ret = -EINVAL;
} else {
ret = 0;
*pfn = tmp >> PAGE_SHIFT;
}
psb_mmu_pt_unmap_unlock(pt);
out:
up_read(&pd->driver->sem);
return ret;
}
| gpl-2.0 |
xenon-cm/android_kernel_samsung_tuna | drivers/media/dvb/mantis/mantis_uart.c | 2456 | 4363 | /*
Mantis PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_reg.h"
#include "mantis_uart.h"
struct mantis_uart_params {
enum mantis_baud baud_rate;
enum mantis_parity parity;
};
static struct {
char string[7];
} rates[5] = {
{ "9600" },
{ "19200" },
{ "38400" },
{ "57600" },
{ "115200" }
};
static struct {
char string[5];
} parity[3] = {
{ "NONE" },
{ "ODD" },
{ "EVEN" }
};
#define UART_MAX_BUF 16
int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
{
struct mantis_hwconfig *config = mantis->hwconfig;
u32 stat = 0, i;
/* get data */
for (i = 0; i < (config->bytes + 1); i++) {
stat = mmread(MANTIS_UART_STAT);
if (stat & MANTIS_UART_RXFIFO_FULL) {
dprintk(MANTIS_ERROR, 1, "RX Fifo FULL");
}
data[i] = mmread(MANTIS_UART_RXD) & 0x3f;
dprintk(MANTIS_DEBUG, 1, "Reading ... <%02x>", data[i] & 0x3f);
if (data[i] & (1 << 7)) {
dprintk(MANTIS_ERROR, 1, "UART framing error");
return -EINVAL;
}
if (data[i] & (1 << 6)) {
dprintk(MANTIS_ERROR, 1, "UART parity error");
return -EINVAL;
}
}
return 0;
}
static void mantis_uart_work(struct work_struct *work)
{
struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
struct mantis_hwconfig *config = mantis->hwconfig;
u8 buf[16];
int i;
mantis_uart_read(mantis, buf);
for (i = 0; i < (config->bytes + 1); i++)
dprintk(MANTIS_INFO, 1, "UART BUF:%d <%02x> ", i, buf[i]);
dprintk(MANTIS_DEBUG, 0, "\n");
}
static int mantis_uart_setup(struct mantis_pci *mantis,
struct mantis_uart_params *params)
{
u32 reg;
mmwrite((mmread(MANTIS_UART_CTL) | (params->parity & 0x3)), MANTIS_UART_CTL);
reg = mmread(MANTIS_UART_BAUD);
switch (params->baud_rate) {
case MANTIS_BAUD_9600:
reg |= 0xd8;
break;
case MANTIS_BAUD_19200:
reg |= 0x6c;
break;
case MANTIS_BAUD_38400:
reg |= 0x36;
break;
case MANTIS_BAUD_57600:
reg |= 0x23;
break;
case MANTIS_BAUD_115200:
reg |= 0x11;
break;
default:
return -EINVAL;
}
mmwrite(reg, MANTIS_UART_BAUD);
return 0;
}
int mantis_uart_init(struct mantis_pci *mantis)
{
struct mantis_hwconfig *config = mantis->hwconfig;
struct mantis_uart_params params;
/* default parity: */
params.baud_rate = config->baud_rate;
params.parity = config->parity;
dprintk(MANTIS_INFO, 1, "Initializing UART @ %sbps parity:%s",
rates[params.baud_rate].string,
parity[params.parity].string);
init_waitqueue_head(&mantis->uart_wq);
spin_lock_init(&mantis->uart_lock);
INIT_WORK(&mantis->uart_work, mantis_uart_work);
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
mantis_uart_setup(mantis, ¶ms);
/* default 1 byte */
mmwrite((mmread(MANTIS_UART_BAUD) | (config->bytes << 8)), MANTIS_UART_BAUD);
/* flush buffer */
mmwrite((mmread(MANTIS_UART_CTL) | MANTIS_UART_RXFLUSH), MANTIS_UART_CTL);
/* enable interrupt */
mmwrite(mmread(MANTIS_INT_MASK) | 0x800, MANTIS_INT_MASK);
mmwrite(mmread(MANTIS_UART_CTL) | MANTIS_UART_RXINT, MANTIS_UART_CTL);
schedule_work(&mantis->uart_work);
dprintk(MANTIS_DEBUG, 1, "UART successfully initialized");
return 0;
}
EXPORT_SYMBOL_GPL(mantis_uart_init);
void mantis_uart_exit(struct mantis_pci *mantis)
{
/* disable interrupt */
mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
flush_work_sync(&mantis->uart_work);
}
EXPORT_SYMBOL_GPL(mantis_uart_exit);
| gpl-2.0 |
iXss/android_kernel_samsung_tuna | net/rds/bind.c | 2712 | 5286 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/if_arp.h>
#include <linux/jhash.h>
#include "rds.h"
#define BIND_HASH_SIZE 1024
static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
static DEFINE_SPINLOCK(rds_bind_lock);
static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
{
return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
(BIND_HASH_SIZE - 1));
}
static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
struct rds_sock *insert)
{
struct rds_sock *rs;
struct hlist_node *node;
struct hlist_head *head = hash_to_bucket(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
rcu_read_lock();
hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
if (cmp == needle) {
rcu_read_unlock();
return rs;
}
}
rcu_read_unlock();
if (insert) {
/*
* make sure our addr and port are set before
* we are added to the list, other people
* in rcu will find us as soon as the
* hlist_add_head_rcu is done
*/
insert->rs_bound_addr = addr;
insert->rs_bound_port = port;
rds_sock_addref(insert);
hlist_add_head_rcu(&insert->rs_bound_node, head);
}
return NULL;
}
/*
* Return the rds_sock bound at the given local address.
*
* The rx path can race with rds_release. We notice if rds_release() has
* marked this socket and don't return a rs ref to the rx path.
*/
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
rs = rds_bind_lookup(addr, port, NULL);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
return rs;
}
/* returns -ve errno or +ve port */
static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
{
unsigned long flags;
int ret = -EADDRINUSE;
u16 rover, last;
if (*port != 0) {
rover = be16_to_cpu(*port);
last = rover;
} else {
rover = max_t(u16, net_random(), 2);
last = rover - 1;
}
spin_lock_irqsave(&rds_bind_lock, flags);
do {
if (rover == 0)
rover++;
if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
*port = rs->rs_bound_port;
ret = 0;
rdsdebug("rs %p binding to %pI4:%d\n",
rs, &addr, (int)ntohs(*port));
break;
}
} while (rover++ != last);
spin_unlock_irqrestore(&rds_bind_lock, flags);
return ret;
}
void rds_remove_bound(struct rds_sock *rs)
{
unsigned long flags;
spin_lock_irqsave(&rds_bind_lock, flags);
if (rs->rs_bound_addr) {
rdsdebug("rs %p unbinding from %pI4:%d\n",
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
hlist_del_init_rcu(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
spin_unlock_irqrestore(&rds_bind_lock, flags);
}
int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
struct rds_sock *rs = rds_sk_to_rs(sk);
struct rds_transport *trans;
int ret = 0;
lock_sock(sk);
if (addr_len != sizeof(struct sockaddr_in) ||
sin->sin_family != AF_INET ||
rs->rs_bound_addr ||
sin->sin_addr.s_addr == htonl(INADDR_ANY)) {
ret = -EINVAL;
goto out;
}
ret = rds_add_bound(rs, sin->sin_addr.s_addr, &sin->sin_port);
if (ret)
goto out;
trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
if (printk_ratelimit())
printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
"load rds_tcp or rds_rdma?\n");
goto out;
}
rs->rs_transport = trans;
ret = 0;
out:
release_sock(sk);
/* we might have called rds_remove_bound on error */
if (ret)
synchronize_rcu();
return ret;
}
| gpl-2.0 |
Rom-Fellas/android-omap-tuna | drivers/acpi/acpica/nsrepair.c | 2968 | 22242 | /******************************************************************************
*
* Module Name: nsrepair - Repair for objects returned by predefined methods
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "acinterp.h"
#include "acpredef.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsrepair")
/*******************************************************************************
*
* This module attempts to repair or convert objects returned by the
* predefined methods to an object type that is expected, as per the ACPI
* specification. The need for this code is dictated by the many machines that
* return incorrect types for the standard predefined methods. Performing these
* conversions here, in one place, eliminates the need for individual ACPI
* device drivers to do the same. Note: Most of these conversions are different
* than the internal object conversion routines used for implicit object
* conversion.
*
* The following conversions can be performed as necessary:
*
* Integer -> String
* Integer -> Buffer
* String -> Integer
* String -> Buffer
* Buffer -> Integer
* Buffer -> String
* Buffer -> Package of Integers
* Package -> Package of one Package
*
* Additional possible repairs:
*
* Required package elements that are NULL replaced by Integer/String/Buffer
* Incorrect standalone package wrapped with required outer package
*
******************************************************************************/
/* Local prototypes */
static acpi_status
acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
union acpi_operand_object **return_object);
static acpi_status
acpi_ns_convert_to_string(union acpi_operand_object *original_object,
union acpi_operand_object **return_object);
static acpi_status
acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
union acpi_operand_object **return_object);
static acpi_status
acpi_ns_convert_to_package(union acpi_operand_object *original_object,
union acpi_operand_object **return_object);
/*******************************************************************************
*
* FUNCTION: acpi_ns_repair_object
*
* PARAMETERS: Data - Pointer to validation data structure
* expected_btypes - Object types expected
* package_index - Index of object within parent package (if
* applicable - ACPI_NOT_PACKAGE_ELEMENT
* otherwise)
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
* RETURN: Status. AE_OK if repair was successful.
*
* DESCRIPTION: Attempt to repair/convert a return object of a type that was
* not expected.
*
******************************************************************************/
acpi_status
acpi_ns_repair_object(struct acpi_predefined_data *data,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
union acpi_operand_object *new_object;
acpi_status status;
ACPI_FUNCTION_NAME(ns_repair_object);
/*
* At this point, we know that the type of the returned object was not
* one of the expected types for this predefined name. Attempt to
* repair the object by converting it to one of the expected object
* types for this predefined name.
*/
if (expected_btypes & ACPI_RTYPE_INTEGER) {
status = acpi_ns_convert_to_integer(return_object, &new_object);
if (ACPI_SUCCESS(status)) {
goto object_repaired;
}
}
if (expected_btypes & ACPI_RTYPE_STRING) {
status = acpi_ns_convert_to_string(return_object, &new_object);
if (ACPI_SUCCESS(status)) {
goto object_repaired;
}
}
if (expected_btypes & ACPI_RTYPE_BUFFER) {
status = acpi_ns_convert_to_buffer(return_object, &new_object);
if (ACPI_SUCCESS(status)) {
goto object_repaired;
}
}
if (expected_btypes & ACPI_RTYPE_PACKAGE) {
status = acpi_ns_convert_to_package(return_object, &new_object);
if (ACPI_SUCCESS(status)) {
goto object_repaired;
}
}
/* We cannot repair this object */
return (AE_AML_OPERAND_TYPE);
object_repaired:
/* Object was successfully repaired */
/*
* If the original object is a package element, we need to:
* 1. Set the reference count of the new object to match the
* reference count of the old object.
* 2. Decrement the reference count of the original object.
*/
if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
new_object->common.reference_count =
return_object->common.reference_count;
if (return_object->common.reference_count > 1) {
return_object->common.reference_count--;
}
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Converted %s to expected %s at index %u\n",
data->pathname,
acpi_ut_get_object_type_name(return_object),
acpi_ut_get_object_type_name(new_object),
package_index));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Converted %s to expected %s\n",
data->pathname,
acpi_ut_get_object_type_name(return_object),
acpi_ut_get_object_type_name(new_object)));
}
/* Delete old object, install the new return object */
acpi_ut_remove_reference(return_object);
*return_object_ptr = new_object;
data->flags |= ACPI_OBJECT_REPAIRED;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_convert_to_integer
*
* PARAMETERS: original_object - Object to be converted
* return_object - Where the new converted object is returned
*
* RETURN: Status. AE_OK if conversion was successful.
*
* DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
*
******************************************************************************/
static acpi_status
acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
union acpi_operand_object *new_object;
acpi_status status;
u64 value = 0;
u32 i;
switch (original_object->common.type) {
case ACPI_TYPE_STRING:
/* String-to-Integer conversion */
status = acpi_ut_strtoul64(original_object->string.pointer,
ACPI_ANY_BASE, &value);
if (ACPI_FAILURE(status)) {
return (status);
}
break;
case ACPI_TYPE_BUFFER:
/* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
if (original_object->buffer.length > 8) {
return (AE_AML_OPERAND_TYPE);
}
/* Extract each buffer byte to create the integer */
for (i = 0; i < original_object->buffer.length; i++) {
value |=
((u64) original_object->buffer.
pointer[i] << (i * 8));
}
break;
default:
return (AE_AML_OPERAND_TYPE);
}
new_object = acpi_ut_create_integer_object(value);
if (!new_object) {
return (AE_NO_MEMORY);
}
*return_object = new_object;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_convert_to_string
*
* PARAMETERS: original_object - Object to be converted
* return_object - Where the new converted object is returned
*
* RETURN: Status. AE_OK if conversion was successful.
*
* DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
*
******************************************************************************/
static acpi_status
acpi_ns_convert_to_string(union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
union acpi_operand_object *new_object;
acpi_size length;
acpi_status status;
switch (original_object->common.type) {
case ACPI_TYPE_INTEGER:
/*
* Integer-to-String conversion. Commonly, convert
* an integer of value 0 to a NULL string. The last element of
* _BIF and _BIX packages occasionally need this fix.
*/
if (original_object->integer.value == 0) {
/* Allocate a new NULL string object */
new_object = acpi_ut_create_string_object(0);
if (!new_object) {
return (AE_NO_MEMORY);
}
} else {
status =
acpi_ex_convert_to_string(original_object,
&new_object,
ACPI_IMPLICIT_CONVERT_HEX);
if (ACPI_FAILURE(status)) {
return (status);
}
}
break;
case ACPI_TYPE_BUFFER:
/*
* Buffer-to-String conversion. Use a to_string
* conversion, no transform performed on the buffer data. The best
* example of this is the _BIF method, where the string data from
* the battery is often (incorrectly) returned as buffer object(s).
*/
length = 0;
while ((length < original_object->buffer.length) &&
(original_object->buffer.pointer[length])) {
length++;
}
/* Allocate a new string object */
new_object = acpi_ut_create_string_object(length);
if (!new_object) {
return (AE_NO_MEMORY);
}
/*
* Copy the raw buffer data with no transform. String is already NULL
* terminated at Length+1.
*/
ACPI_MEMCPY(new_object->string.pointer,
original_object->buffer.pointer, length);
break;
default:
return (AE_AML_OPERAND_TYPE);
}
*return_object = new_object;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_convert_to_buffer
*
* PARAMETERS: original_object - Object to be converted
* return_object - Where the new converted object is returned
*
* RETURN: Status. AE_OK if conversion was successful.
*
* DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
*
******************************************************************************/
static acpi_status
acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
union acpi_operand_object *new_object;
acpi_status status;
union acpi_operand_object **elements;
u32 *dword_buffer;
u32 count;
u32 i;
switch (original_object->common.type) {
case ACPI_TYPE_INTEGER:
/*
* Integer-to-Buffer conversion.
* Convert the Integer to a packed-byte buffer. _MAT and other
* objects need this sometimes, if a read has been performed on a
* Field object that is less than or equal to the global integer
* size (32 or 64 bits).
*/
status =
acpi_ex_convert_to_buffer(original_object, &new_object);
if (ACPI_FAILURE(status)) {
return (status);
}
break;
case ACPI_TYPE_STRING:
/* String-to-Buffer conversion. Simple data copy */
new_object =
acpi_ut_create_buffer_object(original_object->string.
length);
if (!new_object) {
return (AE_NO_MEMORY);
}
ACPI_MEMCPY(new_object->buffer.pointer,
original_object->string.pointer,
original_object->string.length);
break;
case ACPI_TYPE_PACKAGE:
/*
* This case is often seen for predefined names that must return a
* Buffer object with multiple DWORD integers within. For example,
* _FDE and _GTM. The Package can be converted to a Buffer.
*/
/* All elements of the Package must be integers */
elements = original_object->package.elements;
count = original_object->package.count;
for (i = 0; i < count; i++) {
if ((!*elements) ||
((*elements)->common.type != ACPI_TYPE_INTEGER)) {
return (AE_AML_OPERAND_TYPE);
}
elements++;
}
/* Create the new buffer object to replace the Package */
new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
if (!new_object) {
return (AE_NO_MEMORY);
}
/* Copy the package elements (integers) to the buffer as DWORDs */
elements = original_object->package.elements;
dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
for (i = 0; i < count; i++) {
*dword_buffer = (u32) (*elements)->integer.value;
dword_buffer++;
elements++;
}
break;
default:
return (AE_AML_OPERAND_TYPE);
}
*return_object = new_object;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_convert_to_package
*
* PARAMETERS: original_object - Object to be converted
* return_object - Where the new converted object is returned
*
* RETURN: Status. AE_OK if conversion was successful.
*
* DESCRIPTION: Attempt to convert a Buffer object to a Package. Each byte of
* the buffer is converted to a single integer package element.
*
******************************************************************************/
static acpi_status
acpi_ns_convert_to_package(union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
union acpi_operand_object *new_object;
union acpi_operand_object **elements;
u32 length;
u8 *buffer;
switch (original_object->common.type) {
case ACPI_TYPE_BUFFER:
/* Buffer-to-Package conversion */
length = original_object->buffer.length;
new_object = acpi_ut_create_package_object(length);
if (!new_object) {
return (AE_NO_MEMORY);
}
/* Convert each buffer byte to an integer package element */
elements = new_object->package.elements;
buffer = original_object->buffer.pointer;
while (length--) {
*elements =
acpi_ut_create_integer_object((u64) *buffer);
if (!*elements) {
acpi_ut_remove_reference(new_object);
return (AE_NO_MEMORY);
}
elements++;
buffer++;
}
break;
default:
return (AE_AML_OPERAND_TYPE);
}
*return_object = new_object;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_repair_null_element
*
* PARAMETERS: Data - Pointer to validation data structure
* expected_btypes - Object types expected
* package_index - Index of object within parent package (if
* applicable - ACPI_NOT_PACKAGE_ELEMENT
* otherwise)
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
* RETURN: Status. AE_OK if repair was successful.
*
* DESCRIPTION: Attempt to repair a NULL element of a returned Package object.
*
******************************************************************************/
acpi_status
acpi_ns_repair_null_element(struct acpi_predefined_data *data,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
union acpi_operand_object *new_object;
ACPI_FUNCTION_NAME(ns_repair_null_element);
/* No repair needed if return object is non-NULL */
if (return_object) {
return (AE_OK);
}
/*
* Attempt to repair a NULL element of a Package object. This applies to
* predefined names that return a fixed-length package and each element
* is required. It does not apply to variable-length packages where NULL
* elements are allowed, especially at the end of the package.
*/
if (expected_btypes & ACPI_RTYPE_INTEGER) {
/* Need an Integer - create a zero-value integer */
new_object = acpi_ut_create_integer_object((u64)0);
} else if (expected_btypes & ACPI_RTYPE_STRING) {
/* Need a String - create a NULL string */
new_object = acpi_ut_create_string_object(0);
} else if (expected_btypes & ACPI_RTYPE_BUFFER) {
/* Need a Buffer - create a zero-length buffer */
new_object = acpi_ut_create_buffer_object(0);
} else {
/* Error for all other expected types */
return (AE_AML_OPERAND_TYPE);
}
if (!new_object) {
return (AE_NO_MEMORY);
}
/* Set the reference count according to the parent Package object */
new_object->common.reference_count =
data->parent_package->common.reference_count;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Converted NULL package element to expected %s at index %u\n",
data->pathname,
acpi_ut_get_object_type_name(new_object),
package_index));
*return_object_ptr = new_object;
data->flags |= ACPI_OBJECT_REPAIRED;
return (AE_OK);
}
/******************************************************************************
*
* FUNCTION: acpi_ns_remove_null_elements
*
* PARAMETERS: Data - Pointer to validation data structure
* package_type - An acpi_return_package_types value
* obj_desc - A Package object
*
* RETURN: None.
*
* DESCRIPTION: Remove all NULL package elements from packages that contain
* a variable number of sub-packages. For these types of
* packages, NULL elements can be safely removed.
*
*****************************************************************************/
void
acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
u8 package_type,
union acpi_operand_object *obj_desc)
{
union acpi_operand_object **source;
union acpi_operand_object **dest;
u32 count;
u32 new_count;
u32 i;
ACPI_FUNCTION_NAME(ns_remove_null_elements);
/*
* We can safely remove all NULL elements from these package types:
* PTYPE1_VAR packages contain a variable number of simple data types.
* PTYPE2 packages contain a variable number of sub-packages.
*/
switch (package_type) {
case ACPI_PTYPE1_VAR:
case ACPI_PTYPE2:
case ACPI_PTYPE2_COUNT:
case ACPI_PTYPE2_PKG_COUNT:
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
break;
default:
case ACPI_PTYPE1_FIXED:
case ACPI_PTYPE1_OPTION:
return;
}
count = obj_desc->package.count;
new_count = count;
source = obj_desc->package.elements;
dest = source;
/* Examine all elements of the package object, remove nulls */
for (i = 0; i < count; i++) {
if (!*source) {
new_count--;
} else {
*dest = *source;
dest++;
}
source++;
}
/* Update parent package if any null elements were removed */
if (new_count < count) {
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Found and removed %u NULL elements\n",
data->pathname, (count - new_count)));
/* NULL terminate list and update the package count */
*dest = NULL;
obj_desc->package.count = new_count;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_repair_package_list
*
* PARAMETERS: Data - Pointer to validation data structure
* obj_desc_ptr - Pointer to the object to repair. The new
* package object is returned here,
* overwriting the old object.
*
* RETURN: Status, new object in *obj_desc_ptr
*
* DESCRIPTION: Repair a common problem with objects that are defined to return
* a variable-length Package of Packages. If the variable-length
* is one, some BIOS code mistakenly simply declares a single
* Package instead of a Package with one sub-Package. This
* function attempts to repair this error by wrapping a Package
* object around the original Package, creating the correct
* Package with one sub-Package.
*
* Names that can be repaired in this manner include:
* _ALR, _CSD, _HPX, _MLS, _PRT, _PSS, _TRT, TSS
*
******************************************************************************/
acpi_status
acpi_ns_repair_package_list(struct acpi_predefined_data *data,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *pkg_obj_desc;
ACPI_FUNCTION_NAME(ns_repair_package_list);
/*
* Create the new outer package and populate it. The new package will
* have a single element, the lone subpackage.
*/
pkg_obj_desc = acpi_ut_create_package_object(1);
if (!pkg_obj_desc) {
return (AE_NO_MEMORY);
}
pkg_obj_desc->package.elements[0] = *obj_desc_ptr;
/* Return the new object in the object pointer */
*obj_desc_ptr = pkg_obj_desc;
data->flags |= ACPI_OBJECT_REPAIRED;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Repaired incorrectly formed Package\n",
data->pathname));
return (AE_OK);
}
| gpl-2.0 |
DigilentInc/Linux-Digilent-Dev | drivers/input/misc/88pm80x_onkey.c | 3224 | 4382 | /*
* Marvell 88PM80x ONKEY driver
*
* Copyright (C) 2012 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
* Qiao Zhou <zhouqiao@marvell.com>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/mfd/88pm80x.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define PM800_LONG_ONKEY_EN (1 << 0)
#define PM800_LONG_KEY_DELAY (8) /* 1 .. 16 seconds */
#define PM800_LONKEY_PRESS_TIME ((PM800_LONG_KEY_DELAY-1) << 4)
#define PM800_LONKEY_PRESS_TIME_MASK (0xF0)
#define PM800_SW_PDOWN (1 << 5)
struct pm80x_onkey_info {
struct input_dev *idev;
struct pm80x_chip *pm80x;
struct regmap *map;
int irq;
};
/* 88PM80x gives us an interrupt when ONKEY is held */
static irqreturn_t pm80x_onkey_handler(int irq, void *data)
{
struct pm80x_onkey_info *info = data;
int ret = 0;
unsigned int val;
ret = regmap_read(info->map, PM800_STATUS_1, &val);
if (ret < 0) {
dev_err(info->idev->dev.parent, "failed to read status: %d\n", ret);
return IRQ_NONE;
}
val &= PM800_ONKEY_STS1;
input_report_key(info->idev, KEY_POWER, val);
input_sync(info->idev);
return IRQ_HANDLED;
}
static SIMPLE_DEV_PM_OPS(pm80x_onkey_pm_ops, pm80x_dev_suspend,
pm80x_dev_resume);
static int pm80x_onkey_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_onkey_info *info;
int err;
info = kzalloc(sizeof(struct pm80x_onkey_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->pm80x = chip;
info->irq = platform_get_irq(pdev, 0);
if (info->irq < 0) {
dev_err(&pdev->dev, "No IRQ resource!\n");
err = -EINVAL;
goto out;
}
info->map = info->pm80x->regmap;
if (!info->map) {
dev_err(&pdev->dev, "no regmap!\n");
err = -EINVAL;
goto out;
}
info->idev = input_allocate_device();
if (!info->idev) {
dev_err(&pdev->dev, "Failed to allocate input dev\n");
err = -ENOMEM;
goto out;
}
info->idev->name = "88pm80x_on";
info->idev->phys = "88pm80x_on/input0";
info->idev->id.bustype = BUS_I2C;
info->idev->dev.parent = &pdev->dev;
info->idev->evbit[0] = BIT_MASK(EV_KEY);
__set_bit(KEY_POWER, info->idev->keybit);
err = pm80x_request_irq(info->pm80x, info->irq, pm80x_onkey_handler,
IRQF_ONESHOT, "onkey", info);
if (err < 0) {
dev_err(&pdev->dev, "Failed to request IRQ: #%d: %d\n",
info->irq, err);
goto out_reg;
}
err = input_register_device(info->idev);
if (err) {
dev_err(&pdev->dev, "Can't register input device: %d\n", err);
goto out_irq;
}
platform_set_drvdata(pdev, info);
/* Enable long onkey detection */
regmap_update_bits(info->map, PM800_RTC_MISC4, PM800_LONG_ONKEY_EN,
PM800_LONG_ONKEY_EN);
/* Set 8-second interval */
regmap_update_bits(info->map, PM800_RTC_MISC3,
PM800_LONKEY_PRESS_TIME_MASK,
PM800_LONKEY_PRESS_TIME);
device_init_wakeup(&pdev->dev, 1);
return 0;
out_irq:
pm80x_free_irq(info->pm80x, info->irq, info);
out_reg:
input_free_device(info->idev);
out:
kfree(info);
return err;
}
static int pm80x_onkey_remove(struct platform_device *pdev)
{
struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
pm80x_free_irq(info->pm80x, info->irq, info);
input_unregister_device(info->idev);
kfree(info);
return 0;
}
static struct platform_driver pm80x_onkey_driver = {
.driver = {
.name = "88pm80x-onkey",
.owner = THIS_MODULE,
.pm = &pm80x_onkey_pm_ops,
},
.probe = pm80x_onkey_probe,
.remove = pm80x_onkey_remove,
};
module_platform_driver(pm80x_onkey_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Marvell 88PM80x ONKEY driver");
MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
MODULE_ALIAS("platform:88pm80x-onkey");
| gpl-2.0 |
antmicro/linux-tk1 | arch/x86/kernel/step.c | 3480 | 6073 | /*
* x86 single-step support code, common to 32-bit and 64-bit.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
unsigned long addr, seg;
addr = regs->ip;
seg = regs->cs & 0xffff;
if (v8086_mode(regs)) {
addr = (addr & 0xffff) + (seg << 4);
return addr;
}
/*
* We'll assume that the code segments in the GDT
* are all zero-based. That is largely true: the
* TLS segments are used for data, and the PNPBIOS
* and APM bios ones we just ignore here.
*/
if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
struct desc_struct *desc;
unsigned long base;
seg &= ~7UL;
mutex_lock(&child->mm->context.lock);
if (unlikely((seg >> 3) >= child->mm->context.size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = child->mm->context.ldt + seg;
base = get_desc_base(desc);
/* 16-bit code segment? */
if (!desc->d)
addr &= 0xffff;
addr += base;
}
mutex_unlock(&child->mm->context.lock);
}
return addr;
}
static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
{
int i, copied;
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
/* popf and iret */
case 0x9d: case 0xcf:
return 1;
/* CHECKME: 64 65 */
/* opcode and address size prefixes */
case 0x66: case 0x67:
continue;
/* irrelevant prefixes (segment overrides and repeats) */
case 0x26: case 0x2e:
case 0x36: case 0x3e:
case 0x64: case 0x65:
case 0xf0: case 0xf2: case 0xf3:
continue;
#ifdef CONFIG_X86_64
case 0x40 ... 0x4f:
if (!user_64bit_mode(regs))
/* 32-bit mode: register increment */
return 0;
/* 64-bit mode: REX prefix */
continue;
#endif
/* CHECKME: f2, f3 */
/*
* pushf: NOTE! We should probably not let
* the user see the TF bit being set. But
* it's more pain than it's worth to avoid
* it, and a debugger could emulate this
* all in user space if it _really_ cares.
*/
case 0x9c:
default:
return 0;
}
}
return 0;
}
/*
* Enable single-stepping. Return nonzero if user mode is not using TF itself.
*/
static int enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = task_pt_regs(child);
unsigned long oflags;
/*
* If we stepped into a sysenter/syscall insn, it trapped in
* kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
* If user-mode had set TF itself, then it's still clear from
* do_debug() and we need to set it again to restore the user
* state so we don't wrongly set TIF_FORCED_TF below.
* If enable_single_step() was used last and that is what
* set TIF_SINGLESTEP, then both TF and TIF_FORCED_TF are
* already set and our bookkeeping is fine.
*/
if (unlikely(test_tsk_thread_flag(child, TIF_SINGLESTEP)))
regs->flags |= X86_EFLAGS_TF;
/*
* Always set TIF_SINGLESTEP - this guarantees that
* we single-step system calls etc.. This will also
* cause us to set TF when returning to user mode.
*/
set_tsk_thread_flag(child, TIF_SINGLESTEP);
oflags = regs->flags;
/* Set TF on the kernel stack.. */
regs->flags |= X86_EFLAGS_TF;
/*
* ..but if TF is changed by the instruction we will trace,
* don't mark it as being "us" that set it, so that we
* won't clear it by hand later.
*
* Note that if we don't actually execute the popf because
* of a signal arriving right now or suchlike, we will lose
* track of the fact that it really was "us" that set it.
*/
if (is_setting_trap_flag(child, regs)) {
clear_tsk_thread_flag(child, TIF_FORCED_TF);
return 0;
}
/*
* If TF was already set, check whether it was us who set it.
* If not, we should never attempt a block step.
*/
if (oflags & X86_EFLAGS_TF)
return test_tsk_thread_flag(child, TIF_FORCED_TF);
set_tsk_thread_flag(child, TIF_FORCED_TF);
return 1;
}
void set_task_blockstep(struct task_struct *task, bool on)
{
unsigned long debugctl;
/*
* Ensure irq/preemption can't change debugctl in between.
* Note also that both TIF_BLOCKSTEP and debugctl should
* be changed atomically wrt preemption.
*
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
* task is current or it can't be running, otherwise we can race
* with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
* PTRACE_KILL is not safe.
*/
local_irq_disable();
debugctl = get_debugctlmsr();
if (on) {
debugctl |= DEBUGCTLMSR_BTF;
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
} else {
debugctl &= ~DEBUGCTLMSR_BTF;
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
}
if (task == current)
update_debugctlmsr(debugctl);
local_irq_enable();
}
/*
* Enable single or block step.
*/
static void enable_step(struct task_struct *child, bool block)
{
/*
* Make sure block stepping (BTF) is not enabled unless it should be.
* Note that we don't try to worry about any is_setting_trap_flag()
* instructions after the first when using block stepping.
* So no one should try to use debugger block stepping in a program
* that uses user-mode single stepping itself.
*/
if (enable_single_step(child) && block)
set_task_blockstep(child, true);
else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
set_task_blockstep(child, false);
}
void user_enable_single_step(struct task_struct *child)
{
enable_step(child, 0);
}
void user_enable_block_step(struct task_struct *child)
{
enable_step(child, 1);
}
void user_disable_single_step(struct task_struct *child)
{
/*
* Make sure block stepping (BTF) is disabled.
*/
if (test_tsk_thread_flag(child, TIF_BLOCKSTEP))
set_task_blockstep(child, false);
/* Always clear TIF_SINGLESTEP... */
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
/* But touch TF only if it was set by us.. */
if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF))
task_pt_regs(child)->flags &= ~X86_EFLAGS_TF;
}
| gpl-2.0 |
eklovya/android_kernel_lge_p725 | drivers/usb/storage/option_ms.c | 3992 | 4453 | /*
* Driver for Option High Speed Mobile Devices.
*
* (c) 2008 Dan Williams <dcbw@redhat.com>
*
* Inspiration taken from sierra_ms.c by Kevin Lloyd <klloyd@sierrawireless.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
#include <linux/slab.h>
#include "usb.h"
#include "transport.h"
#include "option_ms.h"
#include "debug.h"
#define ZCD_FORCE_MODEM 0x01
#define ZCD_ALLOW_MS 0x02
static unsigned int option_zero_cd = ZCD_FORCE_MODEM;
module_param(option_zero_cd, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(option_zero_cd, "ZeroCD mode (1=Force Modem (default),"
" 2=Allow CD-Rom");
#define RESPONSE_LEN 1024
static int option_rezero(struct us_data *us)
{
const unsigned char rezero_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x78, 0x56, 0x34, 0x12,
0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
char *buffer;
int result;
US_DEBUGP("Option MS: %s", "DEVICE MODE SWITCH\n");
buffer = kzalloc(RESPONSE_LEN, GFP_KERNEL);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
memcpy(buffer, rezero_msg, sizeof(rezero_msg));
result = usb_stor_bulk_transfer_buf(us,
us->send_bulk_pipe,
buffer, sizeof(rezero_msg), NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
/* Some of the devices need to be asked for a response, but we don't
* care what that response is.
*/
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, RESPONSE_LEN, NULL);
/* Read the CSW */
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 13, NULL);
result = USB_STOR_XFER_GOOD;
out:
kfree(buffer);
return result;
}
static int option_inquiry(struct us_data *us)
{
const unsigned char inquiry_msg[] = {
0x55, 0x53, 0x42, 0x43, 0x12, 0x34, 0x56, 0x78,
0x24, 0x00, 0x00, 0x00, 0x80, 0x00, 0x06, 0x12,
0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
char *buffer;
int result;
US_DEBUGP("Option MS: %s", "device inquiry for vendor name\n");
buffer = kzalloc(0x24, GFP_KERNEL);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
memcpy(buffer, inquiry_msg, sizeof(inquiry_msg));
result = usb_stor_bulk_transfer_buf(us,
us->send_bulk_pipe,
buffer, sizeof(inquiry_msg), NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
result = usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 0x24, NULL);
if (result != USB_STOR_XFER_GOOD) {
result = USB_STOR_XFER_ERROR;
goto out;
}
result = memcmp(buffer+8, "Option", 6);
if (result != 0)
result = memcmp(buffer+8, "ZCOPTION", 8);
/* Read the CSW */
usb_stor_bulk_transfer_buf(us,
us->recv_bulk_pipe,
buffer, 13, NULL);
out:
kfree(buffer);
return result;
}
int option_ms_init(struct us_data *us)
{
int result;
US_DEBUGP("Option MS: option_ms_init called\n");
/* Additional test for vendor information via INQUIRY,
* because some vendor/product IDs are ambiguous
*/
result = option_inquiry(us);
if (result != 0) {
US_DEBUGP("Option MS: vendor is not Option or not determinable,"
" no action taken\n");
return 0;
} else
US_DEBUGP("Option MS: this is a genuine Option device,"
" proceeding\n");
/* Force Modem mode */
if (option_zero_cd == ZCD_FORCE_MODEM) {
US_DEBUGP("Option MS: %s", "Forcing Modem Mode\n");
result = option_rezero(us);
if (result != USB_STOR_XFER_GOOD)
US_DEBUGP("Option MS: Failed to switch to modem mode.\n");
return -EIO;
} else if (option_zero_cd == ZCD_ALLOW_MS) {
/* Allow Mass Storage mode (keep CD-Rom) */
US_DEBUGP("Option MS: %s", "Allowing Mass Storage Mode if device"
" requests it\n");
}
return 0;
}
| gpl-2.0 |
armani-dev/android_kernel_xiaomi_armani | drivers/usb/gadget/omap_udc.c | 4760 | 81980 | /*
* omap_udc.c -- for OMAP full speed udc; most chips support OTG.
*
* Copyright (C) 2004 Texas Instruments, Inc.
* Copyright (C) 2004-2005 David Brownell
*
* OMAP2 & DMA support by Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#undef DEBUG
#undef VERBOSE
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <asm/mach-types.h>
#include <plat/dma.h>
#include <plat/usb.h>
#include "omap_udc.h"
#undef USB_TRACE
/* bulk DMA seems to be behaving for both IN and OUT */
#define USE_DMA
/* ISO too */
#define USE_ISO
#define DRIVER_DESC "OMAP UDC driver"
#define DRIVER_VERSION "4 October 2004"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#define OMAP2_DMA_CH(ch) (((ch) - 1) << 1)
#define OMAP24XX_DMA(name, ch) (OMAP24XX_DMA_##name + OMAP2_DMA_CH(ch))
/*
* The OMAP UDC needs _very_ early endpoint setup: before enabling the
* D+ pullup to allow enumeration. That's too early for the gadget
* framework to use from usb_endpoint_enable(), which happens after
* enumeration as part of activating an interface. (But if we add an
* optional new "UDC not yet running" state to the gadget driver model,
* even just during driver binding, the endpoint autoconfig logic is the
* natural spot to manufacture new endpoints.)
*
* So instead of using endpoint enable calls to control the hardware setup,
* this driver defines a "fifo mode" parameter. It's used during driver
* initialization to choose among a set of pre-defined endpoint configs.
* See omap_udc_setup() for available modes, or to add others. That code
* lives in an init section, so use this driver as a module if you need
* to change the fifo mode after the kernel boots.
*
* Gadget drivers normally ignore endpoints they don't care about, and
* won't include them in configuration descriptors. That means only
* misbehaving hosts would even notice they exist.
*/
#ifdef USE_ISO
static unsigned fifo_mode = 3;
#else
static unsigned fifo_mode = 0;
#endif
/* "modprobe omap_udc fifo_mode=42", or else as a kernel
* boot parameter "omap_udc:fifo_mode=42"
*/
module_param (fifo_mode, uint, 0);
MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
#ifdef USE_DMA
static bool use_dma = 1;
/* "modprobe omap_udc use_dma=y", or else as a kernel
* boot parameter "omap_udc:use_dma=y"
*/
module_param (use_dma, bool, 0);
MODULE_PARM_DESC (use_dma, "enable/disable DMA");
#else /* !USE_DMA */
/* save a bit of code */
#define use_dma 0
#endif /* !USE_DMA */
static const char driver_name [] = "omap_udc";
static const char driver_desc [] = DRIVER_DESC;
/*-------------------------------------------------------------------------*/
/* there's a notion of "current endpoint" for modifying endpoint
* state, and PIO access to its FIFO.
*/
static void use_ep(struct omap_ep *ep, u16 select)
{
u16 num = ep->bEndpointAddress & 0x0f;
if (ep->bEndpointAddress & USB_DIR_IN)
num |= UDC_EP_DIR;
omap_writew(num | select, UDC_EP_NUM);
/* when select, MUST deselect later !! */
}
static inline void deselect_ep(void)
{
u16 w;
w = omap_readw(UDC_EP_NUM);
w &= ~UDC_EP_SEL;
omap_writew(w, UDC_EP_NUM);
/* 6 wait states before TX will happen */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned preferred);
/*-------------------------------------------------------------------------*/
static int omap_ep_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_udc *udc;
unsigned long flags;
u16 maxp;
/* catch various bogus parameters */
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < usb_endpoint_maxp(desc)) {
DBG("%s, bad ep or descriptor\n", __func__);
return -EINVAL;
}
maxp = usb_endpoint_maxp(desc);
if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
&& maxp != ep->maxpacket)
|| usb_endpoint_maxp(desc) > ep->maxpacket
|| !desc->wMaxPacketSize) {
DBG("%s, bad %s maxpacket\n", __func__, _ep->name);
return -ERANGE;
}
#ifdef USE_ISO
if ((desc->bmAttributes == USB_ENDPOINT_XFER_ISOC
&& desc->bInterval != 1)) {
/* hardware wants period = 1; USB allows 2^(Interval-1) */
DBG("%s, unsupported ISO period %dms\n", _ep->name,
1 << (desc->bInterval - 1));
return -EDOM;
}
#else
if (desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
DBG("%s, ISO nyet\n", _ep->name);
return -EDOM;
}
#endif
/* xfer types must match, except that interrupt ~= bulk */
if (ep->bmAttributes != desc->bmAttributes
&& ep->bmAttributes != USB_ENDPOINT_XFER_BULK
&& desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
DBG("%s, %s type mismatch\n", __func__, _ep->name);
return -EINVAL;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) {
DBG("%s, bogus device state\n", __func__);
return -ESHUTDOWN;
}
spin_lock_irqsave(&udc->lock, flags);
ep->desc = desc;
ep->irqs = 0;
ep->stopped = 0;
ep->ep.maxpacket = maxp;
/* set endpoint to initial state */
ep->dma_channel = 0;
ep->has_dma = 0;
ep->lch = -1;
use_ep(ep, UDC_EP_SEL);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
deselect_ep();
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
list_add(&ep->iso, &udc->iso);
/* maybe assign a DMA channel to this endpoint */
if (use_dma && desc->bmAttributes == USB_ENDPOINT_XFER_BULK)
/* FIXME ISO can dma, but prefers first channel */
dma_channel_claim(ep, 0);
/* PIO OUT may RX packets */
if (desc->bmAttributes != USB_ENDPOINT_XFER_ISOC
&& !ep->has_dma
&& !(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
spin_unlock_irqrestore(&udc->lock, flags);
VDBG("%s enabled\n", _ep->name);
return 0;
}
static void nuke(struct omap_ep *, int status);
static int omap_ep_disable(struct usb_ep *_ep)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
if (!_ep || !ep->desc) {
DBG("%s, %s not enabled\n", __func__,
_ep ? ep->ep.name : NULL);
return -EINVAL;
}
spin_lock_irqsave(&ep->udc->lock, flags);
ep->desc = NULL;
ep->ep.desc = NULL;
nuke (ep, -ESHUTDOWN);
ep->ep.maxpacket = ep->maxpacket;
ep->has_dma = 0;
omap_writew(UDC_SET_HALT, UDC_CTRL);
list_del_init(&ep->iso);
del_timer(&ep->timer);
spin_unlock_irqrestore(&ep->udc->lock, flags);
VDBG("%s disabled\n", _ep->name);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct omap_req *req;
req = kzalloc(sizeof(*req), gfp_flags);
if (req) {
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD (&req->queue);
}
return &req->req;
}
static void
omap_free_request(struct usb_ep *ep, struct usb_request *_req)
{
struct omap_req *req = container_of(_req, struct omap_req, req);
if (_req)
kfree (req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct omap_ep *ep, struct omap_req *req, int status)
{
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
if (use_dma && ep->has_dma) {
if (req->mapped) {
dma_unmap_single(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else
dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG("complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&ep->udc->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&ep->udc->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
#define UDC_FIFO_FULL (UDC_NON_ISO_FIFO_FULL | UDC_ISO_FIFO_FULL)
#define UDC_FIFO_UNWRITABLE (UDC_EP_HALTED | UDC_FIFO_FULL)
#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
static inline int
write_packet(u8 *buf, struct omap_req *req, unsigned max)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, max);
req->req.actual += len;
max = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (max >= 2) {
omap_writew(*wp++, UDC_DATA);
max -= 2;
}
buf = (u8 *)wp;
}
while (max--)
omap_writeb(*buf++, UDC_DATA);
return len;
}
// FIXME change r/w fifo calling convention
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count;
int is_last;
u16 ep_stat;
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* PIO-IN isn't double buffered except for iso */
ep_stat = omap_readw(UDC_STAT_FLG);
if (ep_stat & UDC_FIFO_UNWRITABLE)
return 0;
count = ep->ep.maxpacket;
count = write_packet(buf, req, count);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1;
/* last packet is often short (sometimes a zlp) */
if (count != ep->ep.maxpacket)
is_last = 1;
else if (req->req.length == req->req.actual
&& !req->req.zero)
is_last = 1;
else
is_last = 0;
/* NOTE: requests complete when all IN data is in a
* FIFO (or sometimes later, if a zlp was needed).
* Use usb_ep_fifo_status() where needed.
*/
if (is_last)
done(ep, req, 0);
return is_last;
}
static inline int
read_packet(u8 *buf, struct omap_req *req, unsigned avail)
{
unsigned len;
u16 *wp;
len = min(req->req.length - req->req.actual, avail);
req->req.actual += len;
avail = len;
if (likely((((int)buf) & 1) == 0)) {
wp = (u16 *)buf;
while (avail >= 2) {
*wp++ = omap_readw(UDC_DATA);
avail -= 2;
}
buf = (u8 *)wp;
}
while (avail--)
*buf++ = omap_readb(UDC_DATA);
return len;
}
// return: 0 = still running, 1 = queue empty, negative = errno
static int read_fifo(struct omap_ep *ep, struct omap_req *req)
{
u8 *buf;
unsigned count, avail;
int is_last;
buf = req->req.buf + req->req.actual;
prefetchw(buf);
for (;;) {
u16 ep_stat = omap_readw(UDC_STAT_FLG);
is_last = 0;
if (ep_stat & FIFO_EMPTY) {
if (!ep->double_buf)
break;
ep->fnf = 1;
}
if (ep_stat & UDC_EP_HALTED)
break;
if (ep_stat & UDC_FIFO_FULL)
avail = ep->ep.maxpacket;
else {
avail = omap_readw(UDC_RXFSTAT);
ep->fnf = ep->double_buf;
}
count = read_packet(buf, req, avail);
/* partial packet reads may not be errors */
if (count < ep->ep.maxpacket) {
is_last = 1;
/* overflowed this request? flush extra data */
if (count != avail) {
req->req.status = -EOVERFLOW;
avail -= count;
while (avail--)
omap_readw(UDC_DATA);
}
} else if (req->req.length == req->req.actual)
is_last = 1;
else
is_last = 0;
if (!ep->bEndpointAddress)
break;
if (is_last)
done(ep, req, 0);
break;
}
return is_last;
}
/*-------------------------------------------------------------------------*/
static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
/* IN-DMA needs this on fault/cancel paths, so 15xx misreports
* the last transfer's bytecount by more than a FIFO's worth.
*/
if (cpu_is_omap15xx())
return 0;
end = omap_get_dma_src_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (end < start)
end += 0x10000;
return end - start;
}
static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
{
dma_addr_t end;
end = omap_get_dma_dst_pos(ep->lch);
if (end == ep->dma_counter)
return 0;
end |= start & (0xffff << 16);
if (cpu_is_omap15xx())
end++;
if (end < start)
end += 0x10000;
return end - start;
}
/* Each USB transfer request using DMA maps to one or more DMA transfers.
* When DMA completion isn't request completion, the UDC continues with
* the next DMA transfer for that USB transfer.
*/
static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
{
u16 txdma_ctrl, w;
unsigned length = req->req.length - req->req.actual;
const int sync_mode = cpu_is_omap15xx()
? OMAP_DMA_SYNC_FRAME
: OMAP_DMA_SYNC_ELEMENT;
int dma_trigger = 0;
if (cpu_is_omap24xx())
dma_trigger = OMAP24XX_DMA(USB_W2FC_TX0, ep->dma_channel);
/* measure length in either bytes or packets */
if ((cpu_is_omap16xx() && length <= UDC_TXN_TSC)
|| (cpu_is_omap24xx() && length < ep->maxpacket)
|| (cpu_is_omap15xx() && length < ep->maxpacket)) {
txdma_ctrl = UDC_TXN_EOT | length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
length, 1, sync_mode, dma_trigger, 0);
} else {
length = min(length / ep->maxpacket,
(unsigned) UDC_TXN_TSC + 1);
txdma_ctrl = length;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, length, sync_mode,
dma_trigger, 0);
length *= ep->maxpacket;
}
omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
omap_start_dma(ep->lch);
ep->dma_counter = omap_get_dma_src_pos(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(UDC_TXN_START | txdma_ctrl, UDC_TXDMA(ep->dma_channel));
req->dma_bytes = length;
}
static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status)
{
u16 w;
if (status == 0) {
req->req.actual += req->dma_bytes;
/* return if this request needs to send data or zlp */
if (req->req.actual < req->req.length)
return;
if (req->req.zero
&& req->dma_bytes != 0
&& (req->req.actual % ep->maxpacket) == 0)
return;
} else
req->req.actual += dma_src_len(ep, req->req.dma
+ req->req.actual);
/* tx completion */
omap_stop_dma(ep->lch);
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_TX_DONE_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
{
unsigned packets = req->req.length - req->req.actual;
int dma_trigger = 0;
u16 w;
if (cpu_is_omap24xx())
dma_trigger = OMAP24XX_DMA(USB_W2FC_RX0, ep->dma_channel);
/* NOTE: we filtered out "short reads" before, so we know
* the buffer has only whole numbers of packets.
* except MODE SELECT(6) sent the 24 bytes data in OMAP24XX DMA mode
*/
if (cpu_is_omap24xx() && packets < ep->maxpacket) {
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
packets, 1, OMAP_DMA_SYNC_ELEMENT,
dma_trigger, 0);
req->dma_bytes = packets;
} else {
/* set up this DMA transfer, enable the fifo, start */
packets /= ep->ep.maxpacket;
packets = min(packets, (unsigned)UDC_RXN_TC + 1);
req->dma_bytes = packets * ep->ep.maxpacket;
omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
ep->ep.maxpacket >> 1, packets,
OMAP_DMA_SYNC_ELEMENT,
dma_trigger, 0);
}
omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
0, 0);
ep->dma_counter = omap_get_dma_dst_pos(ep->lch);
omap_writew(UDC_RXN_STOP | (packets - 1), UDC_RXDMA(ep->dma_channel));
w = omap_readw(UDC_DMA_IRQ_EN);
w |= UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
omap_writew(ep->bEndpointAddress & 0xf, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_start_dma(ep->lch);
}
static void
finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one)
{
u16 count, w;
if (status == 0)
ep->dma_counter = (u16) (req->req.dma + req->req.actual);
count = dma_dest_len(ep, req->req.dma + req->req.actual);
count += req->req.actual;
if (one)
count--;
if (count <= req->req.length)
req->req.actual = count;
if (count != req->dma_bytes || status)
omap_stop_dma(ep->lch);
/* if this wasn't short, request may need another transfer */
else if (req->req.actual < req->req.length)
return;
/* rx completion */
w = omap_readw(UDC_DMA_IRQ_EN);
w &= ~UDC_RX_EOT_IE(ep->dma_channel);
omap_writew(w, UDC_DMA_IRQ_EN);
done(ep, req, status);
}
static void dma_irq(struct omap_udc *udc, u16 irq_src)
{
u16 dman_stat = omap_readw(UDC_DMAN_STAT);
struct omap_ep *ep;
struct omap_req *req;
/* IN dma: tx to host */
if (irq_src & UDC_TXN_DONE) {
ep = &udc->ep[16 + UDC_DMA_TX_SRC(dman_stat)];
ep->irqs++;
/* can see TXN_DONE after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_in_dma(ep, req, 0);
}
omap_writew(UDC_TXN_DONE, UDC_IRQ_SRC);
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_in_dma(ep, req);
}
}
/* OUT dma: rx from host */
if (irq_src & UDC_RXN_EOT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* can see RXN_EOT after dma abort */
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB);
}
omap_writew(UDC_RXN_EOT, UDC_IRQ_SRC);
if (!list_empty (&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
next_out_dma(ep, req);
}
}
if (irq_src & UDC_RXN_CNT) {
ep = &udc->ep[UDC_DMA_RX_SRC(dman_stat)];
ep->irqs++;
/* omap15xx does this unasked... */
VDBG("%s, RX_CNT irq?\n", ep->ep.name);
omap_writew(UDC_RXN_CNT, UDC_IRQ_SRC);
}
}
static void dma_error(int lch, u16 ch_status, void *data)
{
struct omap_ep *ep = data;
/* if ch_status & OMAP_DMA_DROP_IRQ ... */
/* if ch_status & OMAP1_DMA_TOUT_IRQ ... */
ERR("%s dma error, lch %d status %02x\n", ep->ep.name, lch, ch_status);
/* complete current transfer ... */
}
static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
{
u16 reg;
int status, restart, is_in;
int dma_channel;
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (is_in)
reg = omap_readw(UDC_TXDMA_CFG);
else
reg = omap_readw(UDC_RXDMA_CFG);
reg |= UDC_DMA_REQ; /* "pulse" activated */
ep->dma_channel = 0;
ep->lch = -1;
if (channel == 0 || channel > 3) {
if ((reg & 0x0f00) == 0)
channel = 3;
else if ((reg & 0x00f0) == 0)
channel = 2;
else if ((reg & 0x000f) == 0) /* preferred for ISO */
channel = 1;
else {
status = -EMLINK;
goto just_restart;
}
}
reg |= (0x0f & ep->bEndpointAddress) << (4 * (channel - 1));
ep->dma_channel = channel;
if (is_in) {
if (cpu_is_omap24xx())
dma_channel = OMAP24XX_DMA(USB_W2FC_TX0, channel);
else
dma_channel = OMAP_DMA_USB_W2FC_TX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_TXDMA_CFG);
/* EMIFF or SDRC */
omap_set_dma_src_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_src_data_pack(ep->lch, 1);
/* TIPB */
omap_set_dma_dest_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
}
} else {
if (cpu_is_omap24xx())
dma_channel = OMAP24XX_DMA(USB_W2FC_RX0, channel);
else
dma_channel = OMAP_DMA_USB_W2FC_RX0 - 1 + channel;
status = omap_request_dma(dma_channel,
ep->ep.name, dma_error, ep, &ep->lch);
if (status == 0) {
omap_writew(reg, UDC_RXDMA_CFG);
/* TIPB */
omap_set_dma_src_params(ep->lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
UDC_DATA_DMA,
0, 0);
/* EMIFF or SDRC */
omap_set_dma_dest_burst_mode(ep->lch,
OMAP_DMA_DATA_BURST_4);
omap_set_dma_dest_data_pack(ep->lch, 1);
}
}
if (status)
ep->dma_channel = 0;
else {
ep->has_dma = 1;
omap_disable_dma_irq(ep->lch, OMAP_DMA_BLOCK_IRQ);
/* channel type P: hw synch (fifo) */
if (cpu_class_is_omap1() && !cpu_is_omap15xx())
omap_set_dma_channel_mode(ep->lch, OMAP_DMA_LCH_P);
}
just_restart:
/* restart any queue, even if the claim failed */
restart = !ep->stopped && !list_empty(&ep->queue);
if (status)
DBG("%s no dma channel: %d%s\n", ep->ep.name, status,
restart ? " (restart)" : "");
else
DBG("%s claimed %cxdma%d lch %d%s\n", ep->ep.name,
is_in ? 't' : 'r',
ep->dma_channel - 1, ep->lch,
restart ? " (restart)" : "");
if (restart) {
struct omap_req *req;
req = container_of(ep->queue.next, struct omap_req, queue);
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else {
use_ep(ep, UDC_EP_SEL);
(is_in ? write_fifo : read_fifo)(ep, req);
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
}
static void dma_channel_release(struct omap_ep *ep)
{
int shift = 4 * (ep->dma_channel - 1);
u16 mask = 0x0f << shift;
struct omap_req *req;
int active;
/* abort any active usb transfer request */
if (!list_empty(&ep->queue))
req = container_of(ep->queue.next, struct omap_req, queue);
else
req = NULL;
active = omap_get_dma_active_status(ep->lch);
DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
active ? "active" : "idle",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, req);
/* NOTE: re-setting RX_REQ/TX_REQ because of a chip bug (before
* OMAP 1710 ES2.0) where reading the DMA_CFG can clear them.
*/
/* wait till current packet DMA finishes, and fifo empties */
if (ep->bEndpointAddress & USB_DIR_IN) {
omap_writew((omap_readw(UDC_TXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_TXDMA_CFG);
if (req) {
finish_in_dma(ep, req, -ECONNRESET);
/* clear FIFO; hosts probably won't empty it */
use_ep(ep, UDC_EP_SEL);
omap_writew(UDC_CLR_EP, UDC_CTRL);
deselect_ep();
}
while (omap_readw(UDC_TXDMA_CFG) & mask)
udelay(10);
} else {
omap_writew((omap_readw(UDC_RXDMA_CFG) & ~mask) | UDC_DMA_REQ,
UDC_RXDMA_CFG);
/* dma empties the fifo */
while (omap_readw(UDC_RXDMA_CFG) & mask)
udelay(10);
if (req)
finish_out_dma(ep, req, -ECONNRESET, 0);
}
omap_free_dma(ep->lch);
ep->dma_channel = 0;
ep->lch = -1;
/* has_dma still set, till endpoint is fully quiesced */
}
/*-------------------------------------------------------------------------*/
static int
omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req = container_of(_req, struct omap_req, req);
struct omap_udc *udc;
unsigned long flags;
int is_iso = 0;
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
DBG("%s, bad params\n", __func__);
return -EINVAL;
}
if (!_ep || (!ep->desc && ep->bEndpointAddress)) {
DBG("%s, bad ep\n", __func__);
return -EINVAL;
}
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
is_iso = 1;
}
/* this isn't bogus, but OMAP DMA isn't the only hardware to
* have a hard time with partial packet reads... reject it.
* Except OMAP2 can handle the small packets.
*/
if (use_dma
&& ep->has_dma
&& ep->bEndpointAddress != 0
&& (ep->bEndpointAddress & USB_DIR_IN) == 0
&& !cpu_class_is_omap2()
&& (req->req.length % ep->ep.maxpacket) != 0) {
DBG("%s, no partial packet OUT reads\n", __func__);
return -EMSGSIZE;
}
udc = ep->udc;
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (use_dma && ep->has_dma) {
if (req->req.dma == DMA_ADDR_INVALID) {
req->req.dma = dma_map_single(
ep->udc->gadget.dev.parent,
req->req.buf,
req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 1;
} else {
dma_sync_single_for_device(
ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
(ep->bEndpointAddress & USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->mapped = 0;
}
}
VDBG("%s queue req %p, len %d buf %p\n",
ep->ep.name, _req, _req->length, _req->buf);
spin_lock_irqsave(&udc->lock, flags);
req->req.status = -EINPROGRESS;
req->req.actual = 0;
/* maybe kickstart non-iso i/o queues */
if (is_iso) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w |= UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
} else if (list_empty(&ep->queue) && !ep->stopped && !ep->ackwait) {
int is_in;
if (ep->bEndpointAddress == 0) {
if (!udc->ep0_pending || !list_empty (&ep->queue)) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EL2HLT;
}
/* empty DATA stage? */
is_in = udc->ep0_in;
if (!req->req.length) {
/* chip became CONFIGURED or ADDRESSED
* earlier; drivers may already have queued
* requests to non-control endpoints
*/
if (udc->ep0_set_config) {
u16 irq_en = omap_readw(UDC_IRQ_EN);
irq_en |= UDC_DS_CHG_IE | UDC_EP0_IE;
if (!udc->ep0_reset_config)
irq_en |= UDC_EPN_RX_IE
| UDC_EPN_TX_IE;
omap_writew(irq_en, UDC_IRQ_EN);
}
/* STATUS for zero length DATA stages is
* always an IN ... even for IN transfers,
* a weird case which seem to stall OMAP.
*/
omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
/* cleanup */
udc->ep0_pending = 0;
done(ep, req, 0);
req = NULL;
/* non-empty DATA stage */
} else if (is_in) {
omap_writew(UDC_EP_SEL | UDC_EP_DIR, UDC_EP_NUM);
} else {
if (udc->ep0_setup)
goto irq_wait;
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
}
} else {
is_in = ep->bEndpointAddress & USB_DIR_IN;
if (!ep->has_dma)
use_ep(ep, UDC_EP_SEL);
/* if ISO: SOF IRQs must be enabled/disabled! */
}
if (ep->has_dma)
(is_in ? next_in_dma : next_out_dma)(ep, req);
else if (req) {
if ((is_in ? write_fifo : read_fifo)(ep, req) == 1)
req = NULL;
deselect_ep();
if (!is_in) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* IN: 6 wait states before it'll tx */
}
}
irq_wait:
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
struct omap_req *req;
unsigned long flags;
if (!_ep || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->udc->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore(&ep->udc->lock, flags);
return -EINVAL;
}
if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) {
int channel = ep->dma_channel;
/* releasing the channel cancels the request,
* reclaiming the channel restarts the queue
*/
dma_channel_release(ep);
dma_channel_claim(ep, channel);
} else
done(ep, req, -ECONNRESET);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static int omap_ep_set_halt(struct usb_ep *_ep, int value)
{
struct omap_ep *ep = container_of(_ep, struct omap_ep, ep);
unsigned long flags;
int status = -EOPNOTSUPP;
spin_lock_irqsave(&ep->udc->lock, flags);
/* just use protocol stalls for ep0; real halts are annoying */
if (ep->bEndpointAddress == 0) {
if (!ep->udc->ep0_pending)
status = -EINVAL;
else if (value) {
if (ep->udc->ep0_set_config) {
WARNING("error changing config?\n");
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
ep->udc->ep0_pending = 0;
status = 0;
} else /* NOP */
status = 0;
/* otherwise, all active non-ISO endpoints can halt */
} else if (ep->bmAttributes != USB_ENDPOINT_XFER_ISOC && ep->desc) {
/* IN endpoints must already be idle */
if ((ep->bEndpointAddress & USB_DIR_IN)
&& !list_empty(&ep->queue)) {
status = -EAGAIN;
goto done;
}
if (value) {
int channel;
if (use_dma && ep->dma_channel
&& !list_empty(&ep->queue)) {
channel = ep->dma_channel;
dma_channel_release(ep);
} else
channel = 0;
use_ep(ep, UDC_EP_SEL);
if (omap_readw(UDC_STAT_FLG) & UDC_NON_ISO_FIFO_EMPTY) {
omap_writew(UDC_SET_HALT, UDC_CTRL);
status = 0;
} else
status = -EAGAIN;
deselect_ep();
if (channel)
dma_channel_claim(ep, channel);
} else {
use_ep(ep, 0);
omap_writew(ep->udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
}
}
done:
VDBG("%s %s halt stat %d\n", ep->ep.name,
value ? "set" : "clear", status);
spin_unlock_irqrestore(&ep->udc->lock, flags);
return status;
}
static struct usb_ep_ops omap_ep_ops = {
.enable = omap_ep_enable,
.disable = omap_ep_disable,
.alloc_request = omap_alloc_request,
.free_request = omap_free_request,
.queue = omap_ep_queue,
.dequeue = omap_ep_dequeue,
.set_halt = omap_ep_set_halt,
// fifo_status ... report bytes in fifo
// fifo_flush ... flush fifo
};
/*-------------------------------------------------------------------------*/
static int omap_get_frame(struct usb_gadget *gadget)
{
u16 sof = omap_readw(UDC_SOF);
return (sof & UDC_TS_OK) ? (sof & UDC_TS) : -EL2NSYNC;
}
static int omap_wakeup(struct usb_gadget *gadget)
{
struct omap_udc *udc;
unsigned long flags;
int retval = -EHOSTUNREACH;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
if (udc->devstat & UDC_SUS) {
/* NOTE: OTG spec erratum says that OTG devices may
* issue wakeups without host enable.
*/
if (udc->devstat & (UDC_B_HNP_ENABLE|UDC_R_WK_OK)) {
DBG("remote wakeup...\n");
omap_writew(UDC_RMT_WKP, UDC_SYSCON2);
retval = 0;
}
/* NOTE: non-OTG systems may use SRP TOO... */
} else if (!(udc->devstat & UDC_ATT)) {
if (udc->transceiver)
retval = otg_start_srp(udc->transceiver->otg);
}
spin_unlock_irqrestore(&udc->lock, flags);
return retval;
}
static int
omap_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
{
struct omap_udc *udc;
unsigned long flags;
u16 syscon1;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
syscon1 = omap_readw(UDC_SYSCON1);
if (is_selfpowered)
syscon1 |= UDC_SELF_PWR;
else
syscon1 &= ~UDC_SELF_PWR;
omap_writew(syscon1, UDC_SYSCON1);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int can_pullup(struct omap_udc *udc)
{
return udc->driver && udc->softconnect && udc->vbus_active;
}
static void pullup_enable(struct omap_udc *udc)
{
u16 w;
w = omap_readw(UDC_SYSCON1);
w |= UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
}
static void pullup_disable(struct omap_udc *udc)
{
u16 w;
if (!gadget_is_otg(&udc->gadget) && !cpu_is_omap15xx()) {
u32 l;
l = omap_readl(OTG_CTRL);
l &= ~OTG_BSESSVLD;
omap_writel(l, OTG_CTRL);
}
omap_writew(UDC_DS_CHG_IE, UDC_IRQ_EN);
w = omap_readw(UDC_SYSCON1);
w &= ~UDC_PULLUP_EN;
omap_writew(w, UDC_SYSCON1);
}
static struct omap_udc *udc;
static void omap_udc_enable_clock(int enable)
{
if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
return;
if (enable) {
clk_enable(udc->dc_clk);
clk_enable(udc->hhc_clk);
udelay(100);
} else {
clk_disable(udc->hhc_clk);
clk_disable(udc->dc_clk);
}
}
/*
* Called by whatever detects VBUS sessions: external transceiver
* driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
*/
static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
{
struct omap_udc *udc;
unsigned long flags;
u32 l;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
VDBG("VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (cpu_is_omap15xx()) {
/* "software" detect, ignored if !VBUS_MODE_1510 */
l = omap_readl(FUNC_MUX_CTRL_0);
if (is_active)
l |= VBUS_CTRL_1510;
else
l &= ~VBUS_CTRL_1510;
omap_writel(l, FUNC_MUX_CTRL_0);
}
if (udc->dc_clk != NULL && is_active) {
if (!udc->clk_requested) {
omap_udc_enable_clock(1);
udc->clk_requested = 1;
}
}
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
if (udc->dc_clk != NULL && !is_active) {
if (udc->clk_requested) {
omap_udc_enable_clock(0);
udc->clk_requested = 0;
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
struct omap_udc *udc;
udc = container_of(gadget, struct omap_udc, gadget);
if (udc->transceiver)
return usb_phy_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static int omap_pullup(struct usb_gadget *gadget, int is_on)
{
struct omap_udc *udc;
unsigned long flags;
udc = container_of(gadget, struct omap_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
pullup_enable(udc);
else
pullup_disable(udc);
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int omap_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *));
static int omap_udc_stop(struct usb_gadget_driver *driver);
static struct usb_gadget_ops omap_gadget_ops = {
.get_frame = omap_get_frame,
.wakeup = omap_wakeup,
.set_selfpowered = omap_set_selfpowered,
.vbus_session = omap_vbus_session,
.vbus_draw = omap_vbus_draw,
.pullup = omap_pullup,
.start = omap_udc_start,
.stop = omap_udc_stop,
};
/*-------------------------------------------------------------------------*/
/* dequeue ALL requests; caller holds udc->lock */
static void nuke(struct omap_ep *ep, int status)
{
struct omap_req *req;
ep->stopped = 1;
if (use_dma && ep->dma_channel)
dma_channel_release(ep);
use_ep(ep, 0);
omap_writew(UDC_CLR_EP, UDC_CTRL);
if (ep->bEndpointAddress && ep->bmAttributes != USB_ENDPOINT_XFER_ISOC)
omap_writew(UDC_SET_HALT, UDC_CTRL);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct omap_req, queue);
done(ep, req, status);
}
}
/* caller holds udc->lock */
static void udc_quiesce(struct omap_udc *udc)
{
struct omap_ep *ep;
udc->gadget.speed = USB_SPEED_UNKNOWN;
nuke(&udc->ep[0], -ESHUTDOWN);
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list)
nuke(ep, -ESHUTDOWN);
}
/*-------------------------------------------------------------------------*/
static void update_otg(struct omap_udc *udc)
{
u16 devstat;
if (!gadget_is_otg(&udc->gadget))
return;
if (omap_readl(OTG_CTRL) & OTG_ID)
devstat = omap_readw(UDC_DEVSTAT);
else
devstat = 0;
udc->gadget.b_hnp_enable = !!(devstat & UDC_B_HNP_ENABLE);
udc->gadget.a_hnp_support = !!(devstat & UDC_A_HNP_SUPPORT);
udc->gadget.a_alt_hnp_support = !!(devstat & UDC_A_ALT_HNP_SUPPORT);
/* Enable HNP early, avoiding races on suspend irq path.
* ASSUMES OTG state machine B_BUS_REQ input is true.
*/
if (udc->gadget.b_hnp_enable) {
u32 l;
l = omap_readl(OTG_CTRL);
l |= OTG_B_HNPEN | OTG_B_BUSREQ;
l &= ~OTG_PULLUP;
omap_writel(l, OTG_CTRL);
}
}
static void ep0_irq(struct omap_udc *udc, u16 irq_src)
{
struct omap_ep *ep0 = &udc->ep[0];
struct omap_req *req = NULL;
ep0->irqs++;
/* Clear any pending requests and then scrub any rx/tx state
* before starting to handle the SETUP request.
*/
if (irq_src & UDC_SETUP) {
u16 ack = irq_src & (UDC_EP0_TX|UDC_EP0_RX);
nuke(ep0, 0);
if (ack) {
omap_writew(ack, UDC_IRQ_SRC);
irq_src = UDC_SETUP;
}
}
/* IN/OUT packets mean we're in the DATA or STATUS stage.
* This driver uses only uses protocol stalls (ep0 never halts),
* and if we got this far the gadget driver already had a
* chance to stall. Tries to be forgiving of host oddities.
*
* NOTE: the last chance gadget drivers have to stall control
* requests is during their request completion callback.
*/
if (!list_empty(&ep0->queue))
req = container_of(ep0->queue.next, struct omap_req, queue);
/* IN == TX to host */
if (irq_src & UDC_EP0_TX) {
int stat;
omap_writew(UDC_EP0_TX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (udc->ep0_in) {
/* write next IN packet from response,
* or set up the status stage.
*/
if (req)
stat = write_fifo(ep0, req);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (!req && udc->ep0_pending) {
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
udc->ep0_pending = 0;
} /* else: 6 wait states before it'll tx */
} else {
/* ack status stage of OUT transfer */
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
req = NULL;
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
} else {
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
}
}
/* OUT == RX from host */
if (irq_src & UDC_EP0_RX) {
int stat;
omap_writew(UDC_EP0_RX, UDC_IRQ_SRC);
omap_writew(UDC_EP_SEL, UDC_EP_NUM);
stat = omap_readw(UDC_STAT_FLG);
if (stat & UDC_ACK) {
if (!udc->ep0_in) {
stat = 0;
/* read next OUT packet of request, maybe
* reactiviting the fifo; stall on errors.
*/
if (!req || (stat = read_fifo(ep0, req)) < 0) {
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
stat = 0;
} else if (stat == 0)
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
/* activate status stage */
if (stat == 1) {
done(ep0, req, 0);
/* that may have STALLed ep0... */
omap_writew(UDC_EP_SEL | UDC_EP_DIR,
UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
}
} else {
/* ack status stage of IN transfer */
omap_writew(0, UDC_EP_NUM);
if (req)
done(ep0, req, 0);
}
} else if (stat & UDC_STALL) {
omap_writew(UDC_CLR_HALT, UDC_CTRL);
omap_writew(0, UDC_EP_NUM);
} else {
omap_writew(0, UDC_EP_NUM);
}
}
/* SETUP starts all control transfers */
if (irq_src & UDC_SETUP) {
union u {
u16 word[4];
struct usb_ctrlrequest r;
} u;
int status = -EINVAL;
struct omap_ep *ep;
/* read the (latest) SETUP message */
do {
omap_writew(UDC_SETUP_SEL, UDC_EP_NUM);
/* two bytes at a time */
u.word[0] = omap_readw(UDC_DATA);
u.word[1] = omap_readw(UDC_DATA);
u.word[2] = omap_readw(UDC_DATA);
u.word[3] = omap_readw(UDC_DATA);
omap_writew(0, UDC_EP_NUM);
} while (omap_readw(UDC_IRQ_SRC) & UDC_SETUP);
#define w_value le16_to_cpu(u.r.wValue)
#define w_index le16_to_cpu(u.r.wIndex)
#define w_length le16_to_cpu(u.r.wLength)
/* Delegate almost all control requests to the gadget driver,
* except for a handful of ch9 status/feature requests that
* hardware doesn't autodecode _and_ the gadget API hides.
*/
udc->ep0_in = (u.r.bRequestType & USB_DIR_IN) != 0;
udc->ep0_set_config = 0;
udc->ep0_pending = 1;
ep0->stopped = 0;
ep0->ackwait = 0;
switch (u.r.bRequest) {
case USB_REQ_SET_CONFIGURATION:
/* udc needs to know when ep != 0 is valid */
if (u.r.bRequestType != USB_RECIP_DEVICE)
goto delegate;
if (w_length != 0)
goto do_stall;
udc->ep0_set_config = 1;
udc->ep0_reset_config = (w_value == 0);
VDBG("set config %d\n", w_value);
/* update udc NOW since gadget driver may start
* queueing requests immediately; clear config
* later if it fails the request.
*/
if (udc->ep0_reset_config)
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
else
omap_writew(UDC_DEV_CFG, UDC_SYSCON2);
update_otg(udc);
goto delegate;
case USB_REQ_CLEAR_FEATURE:
/* clear endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (ep != ep0) {
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| !ep->desc)
goto do_stall;
use_ep(ep, 0);
omap_writew(udc->clr_halt, UDC_CTRL);
ep->ackwait = 0;
if (!(ep->bEndpointAddress & USB_DIR_IN)) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
/* NOTE: assumes the host behaves sanely,
* only clearing real halts. Else we may
* need to kill pending transfers and then
* restart the queue... very messy for DMA!
*/
}
VDBG("%s halt cleared by host\n", ep->name);
goto ep0out_status_stage;
case USB_REQ_SET_FEATURE:
/* set endpoint halt */
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (w_value != USB_ENDPOINT_HALT
|| w_length != 0)
goto do_stall;
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
|| ep == ep0 || !ep->desc)
goto do_stall;
if (use_dma && ep->has_dma) {
/* this has rude side-effects (aborts) and
* can't really work if DMA-IN is active
*/
DBG("%s host set_halt, NYET \n", ep->name);
goto do_stall;
}
use_ep(ep, 0);
/* can't halt if fifo isn't empty... */
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_HALT, UDC_CTRL);
VDBG("%s halted by host\n", ep->name);
ep0out_status_stage:
status = 0;
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(UDC_CLR_EP, UDC_CTRL);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
udc->ep0_pending = 0;
break;
case USB_REQ_GET_STATUS:
/* USB_ENDPOINT_HALT status? */
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
goto intf_status;
/* ep0 never stalls */
if (!(w_index & 0xf))
goto zero_status;
/* only active endpoints count */
ep = &udc->ep[w_index & 0xf];
if (w_index & USB_DIR_IN)
ep += 16;
if (!ep->desc)
goto do_stall;
/* iso never stalls */
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
goto zero_status;
/* FIXME don't assume non-halted endpoints!! */
ERR("%s status, can't report\n", ep->ep.name);
goto do_stall;
intf_status:
/* return interface status. if we were pedantic,
* we'd detect non-existent interfaces, and stall.
*/
if (u.r.bRequestType
!= (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
zero_status:
/* return two zero bytes */
omap_writew(UDC_EP_SEL|UDC_EP_DIR, UDC_EP_NUM);
omap_writew(0, UDC_DATA);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
omap_writew(UDC_EP_DIR, UDC_EP_NUM);
status = 0;
VDBG("GET_STATUS, interface %d\n", w_index);
/* next, status stage */
break;
default:
delegate:
/* activate the ep0out fifo right away */
if (!udc->ep0_in && w_length) {
omap_writew(0, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
}
/* gadget drivers see class/vendor specific requests,
* {SET,GET}_{INTERFACE,DESCRIPTOR,CONFIGURATION},
* and more
*/
VDBG("SETUP %02x.%02x v%04x i%04x l%04x\n",
u.r.bRequestType, u.r.bRequest,
w_value, w_index, w_length);
#undef w_value
#undef w_index
#undef w_length
/* The gadget driver may return an error here,
* causing an immediate protocol stall.
*
* Else it must issue a response, either queueing a
* response buffer for the DATA stage, or halting ep0
* (causing a protocol stall, not a real halt). A
* zero length buffer means no DATA stage.
*
* It's fine to issue that response after the setup()
* call returns, and this IRQ was handled.
*/
udc->ep0_setup = 1;
spin_unlock(&udc->lock);
status = udc->driver->setup (&udc->gadget, &u.r);
spin_lock(&udc->lock);
udc->ep0_setup = 0;
}
if (status < 0) {
do_stall:
VDBG("req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, status);
if (udc->ep0_set_config) {
if (udc->ep0_reset_config)
WARNING("error resetting config?\n");
else
omap_writew(UDC_CLR_CFG, UDC_SYSCON2);
}
omap_writew(UDC_STALL_CMD, UDC_SYSCON2);
udc->ep0_pending = 0;
}
}
}
/*-------------------------------------------------------------------------*/
#define OTG_FLAGS (UDC_B_HNP_ENABLE|UDC_A_HNP_SUPPORT|UDC_A_ALT_HNP_SUPPORT)
static void devstate_irq(struct omap_udc *udc, u16 irq_src)
{
u16 devstat, change;
devstat = omap_readw(UDC_DEVSTAT);
change = devstat ^ udc->devstat;
udc->devstat = devstat;
if (change & (UDC_USB_RESET|UDC_ATT)) {
udc_quiesce(udc);
if (change & UDC_ATT) {
/* driver for any external transceiver will
* have called omap_vbus_session() already
*/
if (devstat & UDC_ATT) {
udc->gadget.speed = USB_SPEED_FULL;
VDBG("connect\n");
if (!udc->transceiver)
pullup_enable(udc);
// if (driver->connect) call it
} else if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
udc->gadget.speed = USB_SPEED_UNKNOWN;
if (!udc->transceiver)
pullup_disable(udc);
DBG("disconnect, gadget %s\n",
udc->driver->driver.name);
if (udc->driver->disconnect) {
spin_unlock(&udc->lock);
udc->driver->disconnect(&udc->gadget);
spin_lock(&udc->lock);
}
}
change &= ~UDC_ATT;
}
if (change & UDC_USB_RESET) {
if (devstat & UDC_USB_RESET) {
VDBG("RESET=1\n");
} else {
udc->gadget.speed = USB_SPEED_FULL;
INFO("USB reset done, gadget %s\n",
udc->driver->driver.name);
/* ep0 traffic is legal from now on */
omap_writew(UDC_DS_CHG_IE | UDC_EP0_IE,
UDC_IRQ_EN);
}
change &= ~UDC_USB_RESET;
}
}
if (change & UDC_SUS) {
if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
// FIXME tell isp1301 to suspend/resume (?)
if (devstat & UDC_SUS) {
VDBG("suspend\n");
update_otg(udc);
/* HNP could be under way already */
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->suspend) {
spin_unlock(&udc->lock);
udc->driver->suspend(&udc->gadget);
spin_lock(&udc->lock);
}
if (udc->transceiver)
usb_phy_set_suspend(
udc->transceiver, 1);
} else {
VDBG("resume\n");
if (udc->transceiver)
usb_phy_set_suspend(
udc->transceiver, 0);
if (udc->gadget.speed == USB_SPEED_FULL
&& udc->driver->resume) {
spin_unlock(&udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(&udc->lock);
}
}
}
change &= ~UDC_SUS;
}
if (!cpu_is_omap15xx() && (change & OTG_FLAGS)) {
update_otg(udc);
change &= ~OTG_FLAGS;
}
change &= ~(UDC_CFG|UDC_DEF|UDC_ADD);
if (change)
VDBG("devstat %03x, ignore change %03x\n",
devstat, change);
omap_writew(UDC_DS_CHG, UDC_IRQ_SRC);
}
static irqreturn_t omap_udc_irq(int irq, void *_udc)
{
struct omap_udc *udc = _udc;
u16 irq_src;
irqreturn_t status = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
irq_src = omap_readw(UDC_IRQ_SRC);
/* Device state change (usb ch9 stuff) */
if (irq_src & UDC_DS_CHG) {
devstate_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~UDC_DS_CHG;
}
/* EP0 control transfers */
if (irq_src & (UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX)) {
ep0_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_EP0_RX|UDC_SETUP|UDC_EP0_TX);
}
/* DMA transfer completion */
if (use_dma && (irq_src & (UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT))) {
dma_irq(_udc, irq_src);
status = IRQ_HANDLED;
irq_src &= ~(UDC_TXN_DONE|UDC_RXN_CNT|UDC_RXN_EOT);
}
irq_src &= ~(UDC_IRQ_SOF | UDC_EPN_TX|UDC_EPN_RX);
if (irq_src)
DBG("udc_irq, unhandled %03x\n", irq_src);
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
/* workaround for seemingly-lost IRQs for RX ACKs... */
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
static void pio_out_timer(unsigned long _ep)
{
struct omap_ep *ep = (void *) _ep;
unsigned long flags;
u16 stat_flg;
spin_lock_irqsave(&ep->udc->lock, flags);
if (!list_empty(&ep->queue) && ep->ackwait) {
use_ep(ep, UDC_EP_SEL);
stat_flg = omap_readw(UDC_STAT_FLG);
if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
|| (ep->double_buf && HALF_FULL(stat_flg)))) {
struct omap_req *req;
VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) read_fifo(ep, req);
omap_writew(ep->bEndpointAddress, UDC_EP_NUM);
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
} else
deselect_ep();
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
spin_unlock_irqrestore(&ep->udc->lock, flags);
}
static irqreturn_t omap_udc_pio_irq(int irq, void *_dev)
{
u16 epn_stat, irq_src;
irqreturn_t status = IRQ_NONE;
struct omap_ep *ep;
int epnum;
struct omap_udc *udc = _dev;
struct omap_req *req;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
epn_stat = omap_readw(UDC_EPN_STAT);
irq_src = omap_readw(UDC_IRQ_SRC);
/* handle OUT first, to avoid some wasteful NAKs */
if (irq_src & UDC_EPN_RX) {
epnum = (epn_stat >> 8) & 0x0f;
omap_writew(UDC_EPN_RX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_SEL, UDC_EP_NUM);
ep->fnf = 0;
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait--;
if (!list_empty(&ep->queue)) {
int stat;
req = container_of(ep->queue.next,
struct omap_req, queue);
stat = read_fifo(ep, req);
if (!ep->double_buf)
ep->fnf = 1;
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum, UDC_EP_NUM);
/* enabling fifo _after_ clearing ACK, contrary to docs,
* reduces lossage; timer still needed though (sigh).
*/
if (ep->fnf) {
omap_writew(UDC_SET_FIFO_EN, UDC_CTRL);
ep->ackwait = 1 + ep->double_buf;
}
mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
}
/* then IN transfers */
else if (irq_src & UDC_EPN_TX) {
epnum = epn_stat & 0x0f;
omap_writew(UDC_EPN_TX, UDC_IRQ_SRC);
status = IRQ_HANDLED;
ep = &udc->ep[16 + epnum];
ep->irqs++;
omap_writew(epnum | UDC_EP_DIR | UDC_EP_SEL, UDC_EP_NUM);
if (omap_readw(UDC_STAT_FLG) & UDC_ACK) {
ep->ackwait = 0;
if (!list_empty(&ep->queue)) {
req = container_of(ep->queue.next,
struct omap_req, queue);
(void) write_fifo(ep, req);
}
}
/* min 6 clock delay before clearing EP_SEL ... */
epn_stat = omap_readw(UDC_EPN_STAT);
epn_stat = omap_readw(UDC_EPN_STAT);
omap_writew(epnum | UDC_EP_DIR, UDC_EP_NUM);
/* then 6 clocks before it'd tx */
}
spin_unlock_irqrestore(&udc->lock, flags);
return status;
}
#ifdef USE_ISO
static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
{
struct omap_udc *udc = _dev;
struct omap_ep *ep;
int pending = 0;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
/* handle all non-DMA ISO transfers */
list_for_each_entry (ep, &udc->iso, iso) {
u16 stat;
struct omap_req *req;
if (ep->has_dma || list_empty(&ep->queue))
continue;
req = list_entry(ep->queue.next, struct omap_req, queue);
use_ep(ep, UDC_EP_SEL);
stat = omap_readw(UDC_STAT_FLG);
/* NOTE: like the other controller drivers, this isn't
* currently reporting lost or damaged frames.
*/
if (ep->bEndpointAddress & USB_DIR_IN) {
if (stat & UDC_MISS_IN)
/* done(ep, req, -EPROTO) */;
else
write_fifo(ep, req);
} else {
int status = 0;
if (stat & UDC_NO_RXPACKET)
status = -EREMOTEIO;
else if (stat & UDC_ISO_ERR)
status = -EILSEQ;
else if (stat & UDC_DATA_FLUSH)
status = -ENOSR;
if (status)
/* done(ep, req, status) */;
else
read_fifo(ep, req);
}
deselect_ep();
/* 6 wait states before next EP */
ep->irqs++;
if (!list_empty(&ep->queue))
pending = 1;
}
if (!pending) {
u16 w;
w = omap_readw(UDC_IRQ_EN);
w &= ~UDC_SOF_IE;
omap_writew(w, UDC_IRQ_EN);
}
omap_writew(UDC_IRQ_SOF, UDC_IRQ_SRC);
spin_unlock_irqrestore(&udc->lock, flags);
return IRQ_HANDLED;
}
#endif
/*-------------------------------------------------------------------------*/
static inline int machine_without_vbus_sense(void)
{
return (machine_is_omap_innovator()
|| machine_is_omap_osk()
|| machine_is_omap_apollon()
#ifndef CONFIG_MACH_OMAP_H4_OTG
|| machine_is_omap_h4()
#endif
|| machine_is_sx1()
|| cpu_is_omap7xx() /* No known omap7xx boards with vbus sense */
);
}
static int omap_udc_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
int status = -ENODEV;
struct omap_ep *ep;
unsigned long flags;
/* basic sanity tests */
if (!udc)
return -ENODEV;
if (!driver
// FIXME if otg, check: driver->is_otg
|| driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->setup)
return -EINVAL;
spin_lock_irqsave(&udc->lock, flags);
if (udc->driver) {
spin_unlock_irqrestore(&udc->lock, flags);
return -EBUSY;
}
/* reset state */
list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
ep->irqs = 0;
if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC)
continue;
use_ep(ep, 0);
omap_writew(UDC_SET_HALT, UDC_CTRL);
}
udc->ep0_pending = 0;
udc->ep[0].irqs = 0;
udc->softconnect = 1;
/* hook up the driver */
driver->driver.bus = NULL;
udc->driver = driver;
udc->gadget.dev.driver = &driver->driver;
spin_unlock_irqrestore(&udc->lock, flags);
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
status = bind(&udc->gadget);
if (status) {
DBG("bind to %s --> %d\n", driver->driver.name, status);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
goto done;
}
DBG("bound to driver %s\n", driver->driver.name);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
/* connect to bus through transceiver */
if (udc->transceiver) {
status = otg_set_peripheral(udc->transceiver->otg,
&udc->gadget);
if (status < 0) {
ERR("can't bind to transceiver\n");
if (driver->unbind) {
driver->unbind (&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
}
goto done;
}
} else {
if (can_pullup(udc))
pullup_enable (udc);
else
pullup_disable (udc);
}
/* boards that don't have VBUS sensing can't autogate 48MHz;
* can't enter deep sleep while a gadget driver is active.
*/
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 1);
done:
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
return status;
}
static int omap_udc_stop(struct usb_gadget_driver *driver)
{
unsigned long flags;
int status = -ENODEV;
if (!udc)
return -ENODEV;
if (!driver || driver != udc->driver || !driver->unbind)
return -EINVAL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(1);
if (machine_without_vbus_sense())
omap_vbus_session(&udc->gadget, 0);
if (udc->transceiver)
(void) otg_set_peripheral(udc->transceiver->otg, NULL);
else
pullup_disable(udc);
spin_lock_irqsave(&udc->lock, flags);
udc_quiesce(udc);
spin_unlock_irqrestore(&udc->lock, flags);
driver->unbind(&udc->gadget);
udc->gadget.dev.driver = NULL;
udc->driver = NULL;
if (udc->dc_clk != NULL)
omap_udc_enable_clock(0);
DBG("unregistered driver '%s'\n", driver->driver.name);
return status;
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
#include <linux/seq_file.h>
static const char proc_filename[] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void proc_ep_show(struct seq_file *s, struct omap_ep *ep)
{
u16 stat_flg;
struct omap_req *req;
char buf[20];
use_ep(ep, 0);
if (use_dma && ep->has_dma)
snprintf(buf, sizeof buf, "(%cxdma%d lch%d) ",
(ep->bEndpointAddress & USB_DIR_IN) ? 't' : 'r',
ep->dma_channel - 1, ep->lch);
else
buf[0] = 0;
stat_flg = omap_readw(UDC_STAT_FLG);
seq_printf(s,
"\n%s %s%s%sirqs %ld stat %04x " EIGHTBITS FOURBITS "%s\n",
ep->name, buf,
ep->double_buf ? "dbuf " : "",
({char *s; switch(ep->ackwait){
case 0: s = ""; break;
case 1: s = "(ackw) "; break;
case 2: s = "(ackw2) "; break;
default: s = "(?) "; break;
} s;}),
ep->irqs, stat_flg,
(stat_flg & UDC_NO_RXPACKET) ? "no_rxpacket " : "",
(stat_flg & UDC_MISS_IN) ? "miss_in " : "",
(stat_flg & UDC_DATA_FLUSH) ? "data_flush " : "",
(stat_flg & UDC_ISO_ERR) ? "iso_err " : "",
(stat_flg & UDC_ISO_FIFO_EMPTY) ? "iso_fifo_empty " : "",
(stat_flg & UDC_ISO_FIFO_FULL) ? "iso_fifo_full " : "",
(stat_flg & UDC_EP_HALTED) ? "HALT " : "",
(stat_flg & UDC_STALL) ? "STALL " : "",
(stat_flg & UDC_NAK) ? "NAK " : "",
(stat_flg & UDC_ACK) ? "ACK " : "",
(stat_flg & UDC_FIFO_EN) ? "fifo_en " : "",
(stat_flg & UDC_NON_ISO_FIFO_EMPTY) ? "fifo_empty " : "",
(stat_flg & UDC_NON_ISO_FIFO_FULL) ? "fifo_full " : "");
if (list_empty (&ep->queue))
seq_printf(s, "\t(queue empty)\n");
else
list_for_each_entry (req, &ep->queue, queue) {
unsigned length = req->req.actual;
if (use_dma && buf[0]) {
length += ((ep->bEndpointAddress & USB_DIR_IN)
? dma_src_len : dma_dest_len)
(ep, req->req.dma + length);
buf[0] = 0;
}
seq_printf(s, "\treq %p len %d/%d buf %p\n",
&req->req, length,
req->req.length, req->req.buf);
}
}
static char *trx_mode(unsigned m, int enabled)
{
switch (m) {
case 0: return enabled ? "*6wire" : "unused";
case 1: return "4wire";
case 2: return "3wire";
case 3: return "6wire";
default: return "unknown";
}
}
static int proc_otg_show(struct seq_file *s)
{
u32 tmp;
u32 trans = 0;
char *ctrl_name = "(UNKNOWN)";
/* XXX This needs major revision for OMAP2+ */
tmp = omap_readl(OTG_REV);
if (cpu_class_is_omap1()) {
ctrl_name = "tranceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
}
seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
tmp >> 4, tmp & 0xf, ctrl_name, trans);
tmp = omap_readw(OTG_SYSCON_1);
seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
FOURBITS "\n", tmp,
trx_mode(USB2_TRX_MODE(tmp), trans & CONF_USB2_UNI_R),
trx_mode(USB1_TRX_MODE(tmp), trans & CONF_USB1_UNI_R),
(USB0_TRX_MODE(tmp) == 0 && !cpu_is_omap1710())
? "internal"
: trx_mode(USB0_TRX_MODE(tmp), 1),
(tmp & OTG_IDLE_EN) ? " !otg" : "",
(tmp & HST_IDLE_EN) ? " !host" : "",
(tmp & DEV_IDLE_EN) ? " !dev" : "",
(tmp & OTG_RESET_DONE) ? " reset_done" : " reset_active");
tmp = omap_readl(OTG_SYSCON_2);
seq_printf(s, "otg_syscon2 %08x%s" EIGHTBITS
" b_ase_brst=%d hmc=%d\n", tmp,
(tmp & OTG_EN) ? " otg_en" : "",
(tmp & USBX_SYNCHRO) ? " synchro" : "",
// much more SRP stuff
(tmp & SRP_DATA) ? " srp_data" : "",
(tmp & SRP_VBUS) ? " srp_vbus" : "",
(tmp & OTG_PADEN) ? " otg_paden" : "",
(tmp & HMC_PADEN) ? " hmc_paden" : "",
(tmp & UHOST_EN) ? " uhost_en" : "",
(tmp & HMC_TLLSPEED) ? " tllspeed" : "",
(tmp & HMC_TLLATTACH) ? " tllattach" : "",
B_ASE_BRST(tmp),
OTG_HMC(tmp));
tmp = omap_readl(OTG_CTRL);
seq_printf(s, "otg_ctrl %06x" EIGHTBITS EIGHTBITS "%s\n", tmp,
(tmp & OTG_ASESSVLD) ? " asess" : "",
(tmp & OTG_BSESSEND) ? " bsess_end" : "",
(tmp & OTG_BSESSVLD) ? " bsess" : "",
(tmp & OTG_VBUSVLD) ? " vbus" : "",
(tmp & OTG_ID) ? " id" : "",
(tmp & OTG_DRIVER_SEL) ? " DEVICE" : " HOST",
(tmp & OTG_A_SETB_HNPEN) ? " a_setb_hnpen" : "",
(tmp & OTG_A_BUSREQ) ? " a_bus" : "",
(tmp & OTG_B_HNPEN) ? " b_hnpen" : "",
(tmp & OTG_B_BUSREQ) ? " b_bus" : "",
(tmp & OTG_BUSDROP) ? " busdrop" : "",
(tmp & OTG_PULLDOWN) ? " down" : "",
(tmp & OTG_PULLUP) ? " up" : "",
(tmp & OTG_DRV_VBUS) ? " drv" : "",
(tmp & OTG_PD_VBUS) ? " pd_vb" : "",
(tmp & OTG_PU_VBUS) ? " pu_vb" : "",
(tmp & OTG_PU_ID) ? " pu_id" : ""
);
tmp = omap_readw(OTG_IRQ_EN);
seq_printf(s, "otg_irq_en %04x" "\n", tmp);
tmp = omap_readw(OTG_IRQ_SRC);
seq_printf(s, "otg_irq_src %04x" "\n", tmp);
tmp = omap_readw(OTG_OUTCTRL);
seq_printf(s, "otg_outctrl %04x" "\n", tmp);
tmp = omap_readw(OTG_TEST);
seq_printf(s, "otg_test %04x" "\n", tmp);
return 0;
}
static int proc_udc_show(struct seq_file *s, void *_)
{
u32 tmp;
struct omap_ep *ep;
unsigned long flags;
spin_lock_irqsave(&udc->lock, flags);
seq_printf(s, "%s, version: " DRIVER_VERSION
#ifdef USE_ISO
" (iso)"
#endif
"%s\n",
driver_desc,
use_dma ? " (dma)" : "");
tmp = omap_readw(UDC_REV) & 0xff;
seq_printf(s,
"UDC rev %d.%d, fifo mode %d, gadget %s\n"
"hmc %d, transceiver %s\n",
tmp >> 4, tmp & 0xf,
fifo_mode,
udc->driver ? udc->driver->driver.name : "(none)",
HMC,
udc->transceiver
? udc->transceiver->label
: ((cpu_is_omap1710() || cpu_is_omap24xx())
? "external" : "(none)"));
if (cpu_class_is_omap1()) {
seq_printf(s, "ULPD control %04x req %04x status %04x\n",
omap_readw(ULPD_CLOCK_CTRL),
omap_readw(ULPD_SOFT_REQ),
omap_readw(ULPD_STATUS_REQ));
}
/* OTG controller registers */
if (!cpu_is_omap15xx())
proc_otg_show(s);
tmp = omap_readw(UDC_SYSCON1);
seq_printf(s, "\nsyscon1 %04x" EIGHTBITS "\n", tmp,
(tmp & UDC_CFG_LOCK) ? " cfg_lock" : "",
(tmp & UDC_DATA_ENDIAN) ? " data_endian" : "",
(tmp & UDC_DMA_ENDIAN) ? " dma_endian" : "",
(tmp & UDC_NAK_EN) ? " nak" : "",
(tmp & UDC_AUTODECODE_DIS) ? " autodecode_dis" : "",
(tmp & UDC_SELF_PWR) ? " self_pwr" : "",
(tmp & UDC_SOFF_DIS) ? " soff_dis" : "",
(tmp & UDC_PULLUP_EN) ? " PULLUP" : "");
// syscon2 is write-only
/* UDC controller registers */
if (!(tmp & UDC_PULLUP_EN)) {
seq_printf(s, "(suspended)\n");
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
tmp = omap_readw(UDC_DEVSTAT);
seq_printf(s, "devstat %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_B_HNP_ENABLE) ? " b_hnp" : "",
(tmp & UDC_A_HNP_SUPPORT) ? " a_hnp" : "",
(tmp & UDC_A_ALT_HNP_SUPPORT) ? " a_alt_hnp" : "",
(tmp & UDC_R_WK_OK) ? " r_wk_ok" : "",
(tmp & UDC_USB_RESET) ? " usb_reset" : "",
(tmp & UDC_SUS) ? " SUS" : "",
(tmp & UDC_CFG) ? " CFG" : "",
(tmp & UDC_ADD) ? " ADD" : "",
(tmp & UDC_DEF) ? " DEF" : "",
(tmp & UDC_ATT) ? " ATT" : "");
seq_printf(s, "sof %04x\n", omap_readw(UDC_SOF));
tmp = omap_readw(UDC_IRQ_EN);
seq_printf(s, "irq_en %04x" FOURBITS "%s\n", tmp,
(tmp & UDC_SOF_IE) ? " sof" : "",
(tmp & UDC_EPN_RX_IE) ? " epn_rx" : "",
(tmp & UDC_EPN_TX_IE) ? " epn_tx" : "",
(tmp & UDC_DS_CHG_IE) ? " ds_chg" : "",
(tmp & UDC_EP0_IE) ? " ep0" : "");
tmp = omap_readw(UDC_IRQ_SRC);
seq_printf(s, "irq_src %04x" EIGHTBITS "%s%s\n", tmp,
(tmp & UDC_TXN_DONE) ? " txn_done" : "",
(tmp & UDC_RXN_CNT) ? " rxn_cnt" : "",
(tmp & UDC_RXN_EOT) ? " rxn_eot" : "",
(tmp & UDC_IRQ_SOF) ? " sof" : "",
(tmp & UDC_EPN_RX) ? " epn_rx" : "",
(tmp & UDC_EPN_TX) ? " epn_tx" : "",
(tmp & UDC_DS_CHG) ? " ds_chg" : "",
(tmp & UDC_SETUP) ? " setup" : "",
(tmp & UDC_EP0_RX) ? " ep0out" : "",
(tmp & UDC_EP0_TX) ? " ep0in" : "");
if (use_dma) {
unsigned i;
tmp = omap_readw(UDC_DMA_IRQ_EN);
seq_printf(s, "dma_irq_en %04x%s" EIGHTBITS "\n", tmp,
(tmp & UDC_TX_DONE_IE(3)) ? " tx2_done" : "",
(tmp & UDC_RX_CNT_IE(3)) ? " rx2_cnt" : "",
(tmp & UDC_RX_EOT_IE(3)) ? " rx2_eot" : "",
(tmp & UDC_TX_DONE_IE(2)) ? " tx1_done" : "",
(tmp & UDC_RX_CNT_IE(2)) ? " rx1_cnt" : "",
(tmp & UDC_RX_EOT_IE(2)) ? " rx1_eot" : "",
(tmp & UDC_TX_DONE_IE(1)) ? " tx0_done" : "",
(tmp & UDC_RX_CNT_IE(1)) ? " rx0_cnt" : "",
(tmp & UDC_RX_EOT_IE(1)) ? " rx0_eot" : "");
tmp = omap_readw(UDC_RXDMA_CFG);
seq_printf(s, "rxdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if ((tmp & (0x0f << (i * 4))) == 0)
continue;
seq_printf(s, "rxdma[%d] %04x\n", i,
omap_readw(UDC_RXDMA(i + 1)));
}
}
tmp = omap_readw(UDC_TXDMA_CFG);
seq_printf(s, "txdma_cfg %04x\n", tmp);
if (tmp) {
for (i = 0; i < 3; i++) {
if (!(tmp & (0x0f << (i * 4))))
continue;
seq_printf(s, "txdma[%d] %04x\n", i,
omap_readw(UDC_TXDMA(i + 1)));
}
}
}
tmp = omap_readw(UDC_DEVSTAT);
if (tmp & UDC_ATT) {
proc_ep_show(s, &udc->ep[0]);
if (tmp & UDC_ADD) {
list_for_each_entry (ep, &udc->gadget.ep_list,
ep.ep_list) {
if (ep->desc)
proc_ep_show(s, ep);
}
}
}
spin_unlock_irqrestore(&udc->lock, flags);
return 0;
}
static int proc_udc_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_udc_show, NULL);
}
static const struct file_operations proc_ops = {
.owner = THIS_MODULE,
.open = proc_udc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void create_proc_file(void)
{
proc_create(proc_filename, 0, NULL, &proc_ops);
}
static void remove_proc_file(void)
{
remove_proc_entry(proc_filename, NULL);
}
#else
static inline void create_proc_file(void) {}
static inline void remove_proc_file(void) {}
#endif
/*-------------------------------------------------------------------------*/
/* Before this controller can enumerate, we need to pick an endpoint
* configuration, or "fifo_mode" That involves allocating 2KB of packet
* buffer space among the endpoints we'll be operating.
*
* NOTE: as of OMAP 1710 ES2.0, writing a new endpoint config when
* UDC_SYSCON_1.CFG_LOCK is set can now work. We won't use that
* capability yet though.
*/
static unsigned __init
omap_ep_setup(char *name, u8 addr, u8 type,
unsigned buf, unsigned maxp, int dbuf)
{
struct omap_ep *ep;
u16 epn_rxtx = 0;
/* OUT endpoints first, then IN */
ep = &udc->ep[addr & 0xf];
if (addr & USB_DIR_IN)
ep += 16;
/* in case of ep init table bugs */
BUG_ON(ep->name[0]);
/* chip setup ... bit values are same for IN, OUT */
if (type == USB_ENDPOINT_XFER_ISOC) {
switch (maxp) {
case 8: epn_rxtx = 0 << 12; break;
case 16: epn_rxtx = 1 << 12; break;
case 32: epn_rxtx = 2 << 12; break;
case 64: epn_rxtx = 3 << 12; break;
case 128: epn_rxtx = 4 << 12; break;
case 256: epn_rxtx = 5 << 12; break;
case 512: epn_rxtx = 6 << 12; break;
default: BUG();
}
epn_rxtx |= UDC_EPN_RX_ISO;
dbuf = 1;
} else {
/* double-buffering "not supported" on 15xx,
* and ignored for PIO-IN on newer chips
* (for more reliable behavior)
*/
if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx())
dbuf = 0;
switch (maxp) {
case 8: epn_rxtx = 0 << 12; break;
case 16: epn_rxtx = 1 << 12; break;
case 32: epn_rxtx = 2 << 12; break;
case 64: epn_rxtx = 3 << 12; break;
default: BUG();
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
init_timer(&ep->timer);
ep->timer.function = pio_out_timer;
ep->timer.data = (unsigned long) ep;
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
BUG_ON(buf & 0x07);
epn_rxtx |= buf >> 3;
DBG("%s addr %02x rxtx %04x maxp %d%s buf %d\n",
name, addr, epn_rxtx, maxp, dbuf ? "x2" : "", buf);
if (addr & USB_DIR_IN)
omap_writew(epn_rxtx, UDC_EP_TX(addr & 0xf));
else
omap_writew(epn_rxtx, UDC_EP_RX(addr));
/* next endpoint's buffer starts after this one's */
buf += maxp;
if (dbuf)
buf += maxp;
BUG_ON(buf > 2048);
/* set up driver data structures */
BUG_ON(strlen(name) >= sizeof ep->name);
strlcpy(ep->name, name, sizeof ep->name);
INIT_LIST_HEAD(&ep->queue);
INIT_LIST_HEAD(&ep->iso);
ep->bEndpointAddress = addr;
ep->bmAttributes = type;
ep->double_buf = dbuf;
ep->udc = udc;
ep->ep.name = ep->name;
ep->ep.ops = &omap_ep_ops;
ep->ep.maxpacket = ep->maxpacket = maxp;
list_add_tail (&ep->ep.ep_list, &udc->gadget.ep_list);
return buf;
}
static void omap_udc_release(struct device *dev)
{
complete(udc->done);
kfree (udc);
udc = NULL;
}
static int __init
omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
{
unsigned tmp, buf;
/* abolish any previous hardware state */
omap_writew(0, UDC_SYSCON1);
omap_writew(0, UDC_IRQ_EN);
omap_writew(UDC_IRQ_SRC_MASK, UDC_IRQ_SRC);
omap_writew(0, UDC_DMA_IRQ_EN);
omap_writew(0, UDC_RXDMA_CFG);
omap_writew(0, UDC_TXDMA_CFG);
/* UDC_PULLUP_EN gates the chip clock */
// OTG_SYSCON_1 |= DEV_IDLE_EN;
udc = kzalloc(sizeof(*udc), GFP_KERNEL);
if (!udc)
return -ENOMEM;
spin_lock_init (&udc->lock);
udc->gadget.ops = &omap_gadget_ops;
udc->gadget.ep0 = &udc->ep[0].ep;
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
device_initialize(&udc->gadget.dev);
dev_set_name(&udc->gadget.dev, "gadget");
udc->gadget.dev.release = omap_udc_release;
udc->gadget.dev.parent = &odev->dev;
if (use_dma)
udc->gadget.dev.dma_mask = odev->dev.dma_mask;
udc->transceiver = xceiv;
/* ep0 is special; put it right after the SETUP buffer */
buf = omap_ep_setup("ep0", 0, USB_ENDPOINT_XFER_CONTROL,
8 /* after SETUP */, 64 /* maxpacket */, 0);
list_del_init(&udc->ep[0].ep.ep_list);
/* initially disable all non-ep0 endpoints */
for (tmp = 1; tmp < 15; tmp++) {
omap_writew(0, UDC_EP_RX(tmp));
omap_writew(0, UDC_EP_TX(tmp));
}
#define OMAP_BULK_EP(name,addr) \
buf = omap_ep_setup(name "-bulk", addr, \
USB_ENDPOINT_XFER_BULK, buf, 64, 1);
#define OMAP_INT_EP(name,addr, maxp) \
buf = omap_ep_setup(name "-int", addr, \
USB_ENDPOINT_XFER_INT, buf, maxp, 0);
#define OMAP_ISO_EP(name,addr, maxp) \
buf = omap_ep_setup(name "-iso", addr, \
USB_ENDPOINT_XFER_ISOC, buf, maxp, 1);
switch (fifo_mode) {
case 0:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
break;
case 1:
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
OMAP_BULK_EP("ep3in", USB_DIR_IN | 3);
OMAP_BULK_EP("ep4out", USB_DIR_OUT | 4);
OMAP_INT_EP("ep10in", USB_DIR_IN | 10, 16);
OMAP_BULK_EP("ep5in", USB_DIR_IN | 5);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep11in", USB_DIR_IN | 11, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep6out", USB_DIR_OUT | 6);
OMAP_INT_EP("ep12in", USB_DIR_IN | 12, 16);
OMAP_BULK_EP("ep7in", USB_DIR_IN | 7);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep13in", USB_DIR_IN | 13, 16);
OMAP_INT_EP("ep13out", USB_DIR_OUT | 13, 16);
OMAP_BULK_EP("ep8in", USB_DIR_IN | 8);
OMAP_BULK_EP("ep8out", USB_DIR_OUT | 8);
OMAP_INT_EP("ep14in", USB_DIR_IN | 14, 16);
OMAP_INT_EP("ep14out", USB_DIR_OUT | 14, 16);
OMAP_BULK_EP("ep15in", USB_DIR_IN | 15);
OMAP_BULK_EP("ep15out", USB_DIR_OUT | 15);
break;
#ifdef USE_ISO
case 2: /* mixed iso/bulk */
OMAP_ISO_EP("ep1in", USB_DIR_IN | 1, 256);
OMAP_ISO_EP("ep2out", USB_DIR_OUT | 2, 256);
OMAP_ISO_EP("ep3in", USB_DIR_IN | 3, 128);
OMAP_ISO_EP("ep4out", USB_DIR_OUT | 4, 128);
OMAP_INT_EP("ep5in", USB_DIR_IN | 5, 16);
OMAP_BULK_EP("ep6in", USB_DIR_IN | 6);
OMAP_BULK_EP("ep7out", USB_DIR_OUT | 7);
OMAP_INT_EP("ep8in", USB_DIR_IN | 8, 16);
break;
case 3: /* mixed bulk/iso */
OMAP_BULK_EP("ep1in", USB_DIR_IN | 1);
OMAP_BULK_EP("ep2out", USB_DIR_OUT | 2);
OMAP_INT_EP("ep3in", USB_DIR_IN | 3, 16);
OMAP_BULK_EP("ep4in", USB_DIR_IN | 4);
OMAP_BULK_EP("ep5out", USB_DIR_OUT | 5);
OMAP_INT_EP("ep6in", USB_DIR_IN | 6, 16);
OMAP_ISO_EP("ep7in", USB_DIR_IN | 7, 256);
OMAP_ISO_EP("ep8out", USB_DIR_OUT | 8, 256);
OMAP_INT_EP("ep9in", USB_DIR_IN | 9, 16);
break;
#endif
/* add more modes as needed */
default:
ERR("unsupported fifo_mode #%d\n", fifo_mode);
return -ENODEV;
}
omap_writew(UDC_CFG_LOCK|UDC_SELF_PWR, UDC_SYSCON1);
INFO("fifo mode %d, %d bytes not used\n", fifo_mode, 2048 - buf);
return 0;
}
static int __init omap_udc_probe(struct platform_device *pdev)
{
int status = -ENODEV;
int hmc;
struct usb_phy *xceiv = NULL;
const char *type = NULL;
struct omap_usb_config *config = pdev->dev.platform_data;
struct clk *dc_clk;
struct clk *hhc_clk;
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1,
driver_name)) {
DBG("request_mem_region failed\n");
return -EBUSY;
}
if (cpu_is_omap16xx()) {
dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
if (cpu_is_omap24xx()) {
dc_clk = clk_get(&pdev->dev, "usb_fck");
hhc_clk = clk_get(&pdev->dev, "usb_l4_ick");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
if (cpu_is_omap7xx()) {
dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
/* can't use omap_udc_enable_clock yet */
clk_enable(dc_clk);
clk_enable(hhc_clk);
udelay(100);
}
INFO("OMAP UDC rev %d.%d%s\n",
omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
config->otg ? ", Mini-AB" : "");
/* use the mode given to us by board init code */
if (cpu_is_omap15xx()) {
hmc = HMC_1510;
type = "(unknown)";
if (machine_without_vbus_sense()) {
/* just set up software VBUS detect, and then
* later rig it so we always report VBUS.
* FIXME without really sensing VBUS, we can't
* know when to turn PULLUP_EN on/off; and that
* means we always "need" the 48MHz clock.
*/
u32 tmp = omap_readl(FUNC_MUX_CTRL_0);
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
tmp |= VBUS_MODE_1510;
tmp &= ~VBUS_CTRL_1510;
omap_writel(tmp, FUNC_MUX_CTRL_0);
}
} else {
/* The transceiver may package some GPIO logic or handle
* loopback and/or transceiverless setup; if we find one,
* use it. Except for OTG, we don't _need_ to talk to one;
* but not having one probably means no VBUS detection.
*/
xceiv = usb_get_transceiver();
if (xceiv)
type = xceiv->label;
else if (config->otg) {
DBG("OTG requires external transceiver!\n");
goto cleanup0;
}
hmc = HMC_1610;
if (cpu_is_omap24xx()) {
/* this could be transceiverless in one of the
* "we don't need to know" modes.
*/
type = "external";
goto known;
}
switch (hmc) {
case 0: /* POWERUP DEFAULT == 0 */
case 4:
case 12:
case 20:
if (!cpu_is_omap1710()) {
type = "integrated";
break;
}
/* FALL THROUGH */
case 3:
case 11:
case 16:
case 19:
case 25:
if (!xceiv) {
DBG("external transceiver not registered!\n");
type = "unknown";
}
break;
case 21: /* internal loopback */
type = "loopback";
break;
case 14: /* transceiverless */
if (cpu_is_omap1710())
goto bad_on_1710;
/* FALL THROUGH */
case 13:
case 15:
type = "no";
break;
default:
bad_on_1710:
ERR("unrecognized UDC HMC mode %d\n", hmc);
goto cleanup0;
}
}
known:
INFO("hmc mode %d, %s transceiver\n", hmc, type);
/* a "gadget" abstracts/virtualizes the controller */
status = omap_udc_setup(pdev, xceiv);
if (status) {
goto cleanup0;
}
xceiv = NULL;
// "udc" is now valid
pullup_disable(udc);
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
udc->gadget.is_otg = (config->otg != 0);
#endif
/* starting with omap1710 es2.0, clear toggle is a separate bit */
if (omap_readw(UDC_REV) >= 0x61)
udc->clr_halt = UDC_RESET_EP | UDC_CLRDATA_TOGGLE;
else
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
status = request_irq(pdev->resource[1].start, omap_udc_irq,
IRQF_SAMPLE_RANDOM, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
goto cleanup1;
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
goto cleanup2;
}
#ifdef USE_ISO
status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
goto cleanup3;
}
#endif
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
clk_disable(hhc_clk);
clk_disable(dc_clk);
}
if (cpu_is_omap24xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
/* FIXME OMAP2 don't release hhc & dc clock */
#if 0
clk_disable(hhc_clk);
clk_disable(dc_clk);
#endif
}
create_proc_file();
status = device_add(&udc->gadget.dev);
if (status)
goto cleanup4;
status = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (!status)
return status;
/* If fail, fall through */
cleanup4:
remove_proc_file();
#ifdef USE_ISO
cleanup3:
free_irq(pdev->resource[2].start, udc);
#endif
cleanup2:
free_irq(pdev->resource[1].start, udc);
cleanup1:
kfree (udc);
udc = NULL;
cleanup0:
if (xceiv)
usb_put_transceiver(xceiv);
if (cpu_is_omap16xx() || cpu_is_omap24xx() || cpu_is_omap7xx()) {
clk_disable(hhc_clk);
clk_disable(dc_clk);
clk_put(hhc_clk);
clk_put(dc_clk);
}
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
return status;
}
static int __exit omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
if (!udc)
return -ENODEV;
usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
udc->done = &done;
pullup_disable(udc);
if (udc->transceiver) {
usb_put_transceiver(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
remove_proc_file();
#ifdef USE_ISO
free_irq(pdev->resource[3].start, udc);
#endif
free_irq(pdev->resource[2].start, udc);
free_irq(pdev->resource[1].start, udc);
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
device_unregister(&udc->gadget.dev);
wait_for_completion(&done);
return 0;
}
/* suspend/resume/wakeup from sysfs (echo > power/state) or when the
* system is forced into deep sleep
*
* REVISIT we should probably reject suspend requests when there's a host
* session active, rather than disconnecting, at least on boards that can
* report VBUS irqs (UDC_DEVSTAT.UDC_ATT). And in any case, we need to
* make host resumes and VBUS detection trigger OMAP wakeup events; that
* may involve talking to an external transceiver (e.g. isp1301).
*/
static int omap_udc_suspend(struct platform_device *dev, pm_message_t message)
{
u32 devstat;
devstat = omap_readw(UDC_DEVSTAT);
/* we're requesting 48 MHz clock if the pullup is enabled
* (== we're attached to the host) and we're not suspended,
* which would prevent entry to deep sleep...
*/
if ((devstat & UDC_ATT) != 0 && (devstat & UDC_SUS) == 0) {
WARNING("session active; suspend requires disconnect\n");
omap_pullup(&udc->gadget, 0);
}
return 0;
}
static int omap_udc_resume(struct platform_device *dev)
{
DBG("resume + wakeup/SRP\n");
omap_pullup(&udc->gadget, 1);
/* maybe the host would enumerate us if we nudged it */
msleep(100);
return omap_wakeup(&udc->gadget);
}
/*-------------------------------------------------------------------------*/
static struct platform_driver udc_driver = {
.remove = __exit_p(omap_udc_remove),
.suspend = omap_udc_suspend,
.resume = omap_udc_resume,
.driver = {
.owner = THIS_MODULE,
.name = (char *) driver_name,
},
};
static int __init udc_init(void)
{
/* Disable DMA for omap7xx -- it doesn't work right. */
if (cpu_is_omap7xx())
use_dma = 0;
INFO("%s, version: " DRIVER_VERSION
#ifdef USE_ISO
" (iso)"
#endif
"%s\n", driver_desc,
use_dma ? " (dma)" : "");
return platform_driver_probe(&udc_driver, omap_udc_probe);
}
module_init(udc_init);
static void __exit udc_exit(void)
{
platform_driver_unregister(&udc_driver);
}
module_exit(udc_exit);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap_udc");
| gpl-2.0 |
sayeed99/android_kernel_asus_moorefield | arch/arm/mach-omap1/io.c | 4760 | 3859 | /*
* linux/arch/arm/mach-omap1/io.c
*
* OMAP1 I/O mapping code
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <mach/mux.h>
#include <mach/tc.h>
#include <linux/omap-dma.h>
#include "iomap.h"
#include "common.h"
#include "clock.h"
/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
static struct map_desc omap_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
}
};
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
static struct map_desc omap7xx_io_desc[] __initdata = {
{
.virtual = OMAP7XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSP_START),
.length = OMAP7XX_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP7XX_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSPREG_START),
.length = OMAP7XX_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
#ifdef CONFIG_ARCH_OMAP15XX
static struct map_desc omap1510_io_desc[] __initdata = {
{
.virtual = OMAP1510_DSP_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSP_START),
.length = OMAP1510_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP1510_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSPREG_START),
.length = OMAP1510_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
static struct map_desc omap16xx_io_desc[] __initdata = {
{
.virtual = OMAP16XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSP_START),
.length = OMAP16XX_DSP_SIZE,
.type = MT_DEVICE
}, {
.virtual = OMAP16XX_DSPREG_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSPREG_START),
.length = OMAP16XX_DSPREG_SIZE,
.type = MT_DEVICE
}
};
#endif
/*
* Maps common IO regions for omap1
*/
static void __init omap1_map_common_io(void)
{
iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
}
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
void __init omap7xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
}
#endif
#ifdef CONFIG_ARCH_OMAP15XX
void __init omap15xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
}
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
void __init omap16xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
}
#endif
/*
* Common low-level hardware init for omap1.
*/
void __init omap1_init_early(void)
{
omap_check_revision();
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge".
*/
omap_writew(0x0, MPU_PUBLIC_TIPB_CNTL);
omap_writew(0x0, MPU_PRIVATE_TIPB_CNTL);
/* Must init clocks early to assure that timer interrupt works
*/
omap1_clk_init();
omap1_mux_init();
}
void __init omap1_init_late(void)
{
omap_serial_wakeup_init();
}
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
u8 omap_readb(u32 pa)
{
return __raw_readb(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readb);
u16 omap_readw(u32 pa)
{
return __raw_readw(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readw);
u32 omap_readl(u32 pa)
{
return __raw_readl(OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_readl);
void omap_writeb(u8 v, u32 pa)
{
__raw_writeb(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writeb);
void omap_writew(u16 v, u32 pa)
{
__raw_writew(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writew);
void omap_writel(u32 v, u32 pa)
{
__raw_writel(v, OMAP1_IO_ADDRESS(pa));
}
EXPORT_SYMBOL(omap_writel);
| gpl-2.0 |
TeamRegular/android_kernel_samsung_exynos5420 | drivers/net/tokenring/abyss.c | 4760 | 11175 | /*
* abyss.c: Network driver for the Madge Smart 16/4 PCI Mk2 token ring card.
*
* Written 1999-2000 by Adam Fritzler
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* This driver module supports the following cards:
* - Madge Smart 16/4 PCI Mk2
*
* Maintainer(s):
* AF Adam Fritzler
*
* Modification History:
* 30-Dec-99 AF Split off from the tms380tr driver.
* 22-Jan-00 AF Updated to use indirect read/writes
* 23-Nov-00 JG New PCI API, cleanups
*
*
* TODO:
* 1. See if we can use MMIO instead of inb/outb/inw/outw
* 2. Add support for Mk1 (has AT24 attached to the PCI
* config registers)
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "tms380tr.h"
#include "abyss.h" /* Madge-specific constants */
static char version[] __devinitdata =
"abyss.c: v1.02 23/11/2000 by Adam Fritzler\n";
#define ABYSS_IO_EXTENT 64
static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, abyss_pci_tbl);
MODULE_LICENSE("GPL");
static int abyss_open(struct net_device *dev);
static int abyss_close(struct net_device *dev);
static void abyss_enable(struct net_device *dev);
static int abyss_chipset_init(struct net_device *dev);
static void abyss_read_eeprom(struct net_device *dev);
static unsigned short abyss_setnselout_pins(struct net_device *dev);
static void at24_writedatabyte(unsigned long regaddr, unsigned char byte);
static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr);
static int at24_sendcmd(unsigned long regaddr, unsigned char cmd);
static unsigned char at24_readdatabit(unsigned long regaddr);
static unsigned char at24_readdatabyte(unsigned long regaddr);
static int at24_waitforack(unsigned long regaddr);
static int at24_waitfornack(unsigned long regaddr);
static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data);
static void at24_start(unsigned long regaddr);
static unsigned char at24_readb(unsigned long regaddr, unsigned char addr);
static unsigned short abyss_sifreadb(struct net_device *dev, unsigned short reg)
{
return inb(dev->base_addr + reg);
}
static unsigned short abyss_sifreadw(struct net_device *dev, unsigned short reg)
{
return inw(dev->base_addr + reg);
}
static void abyss_sifwriteb(struct net_device *dev, unsigned short val, unsigned short reg)
{
outb(val, dev->base_addr + reg);
}
static void abyss_sifwritew(struct net_device *dev, unsigned short val, unsigned short reg)
{
outw(val, dev->base_addr + reg);
}
static struct net_device_ops abyss_netdev_ops;
static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int versionprinted;
struct net_device *dev;
struct net_local *tp;
int ret, pci_irq_line;
unsigned long pci_ioaddr;
if (versionprinted++ == 0)
printk("%s", version);
if (pci_enable_device(pdev))
return -EIO;
/* Remove I/O space marker in bit 0. */
pci_irq_line = pdev->irq;
pci_ioaddr = pci_resource_start (pdev, 0);
/* At this point we have found a valid card. */
dev = alloc_trdev(sizeof(struct net_local));
if (!dev)
return -ENOMEM;
if (!request_region(pci_ioaddr, ABYSS_IO_EXTENT, dev->name)) {
ret = -EBUSY;
goto err_out_trdev;
}
ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
dev->name, dev);
if (ret)
goto err_out_region;
dev->base_addr = pci_ioaddr;
dev->irq = pci_irq_line;
printk("%s: Madge Smart 16/4 PCI Mk2 (Abyss)\n", dev->name);
printk("%s: IO: %#4lx IRQ: %d\n",
dev->name, pci_ioaddr, dev->irq);
/*
* The TMS SIF registers lay 0x10 above the card base address.
*/
dev->base_addr += 0x10;
ret = tmsdev_init(dev, &pdev->dev);
if (ret) {
printk("%s: unable to get memory for dev->priv.\n",
dev->name);
goto err_out_irq;
}
abyss_read_eeprom(dev);
printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr);
tp = netdev_priv(dev);
tp->setnselout = abyss_setnselout_pins;
tp->sifreadb = abyss_sifreadb;
tp->sifreadw = abyss_sifreadw;
tp->sifwriteb = abyss_sifwriteb;
tp->sifwritew = abyss_sifwritew;
memcpy(tp->ProductID, "Madge PCI 16/4 Mk2", PROD_ID_SIZE + 1);
dev->netdev_ops = &abyss_netdev_ops;
pci_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
if (ret)
goto err_out_tmsdev;
return 0;
err_out_tmsdev:
pci_set_drvdata(pdev, NULL);
tmsdev_term(dev);
err_out_irq:
free_irq(pdev->irq, dev);
err_out_region:
release_region(pci_ioaddr, ABYSS_IO_EXTENT);
err_out_trdev:
free_netdev(dev);
return ret;
}
static unsigned short abyss_setnselout_pins(struct net_device *dev)
{
unsigned short val = 0;
struct net_local *tp = netdev_priv(dev);
if(tp->DataRate == SPEED_4)
val |= 0x01; /* Set 4Mbps */
else
val |= 0x00; /* Set 16Mbps */
return val;
}
/*
* The following Madge boards should use this code:
* - Smart 16/4 PCI Mk2 (Abyss)
* - Smart 16/4 PCI Mk1 (PCI T)
* - Smart 16/4 Client Plus PnP (Big Apple)
* - Smart 16/4 Cardbus Mk2
*
* These access an Atmel AT24 SEEPROM using their glue chip registers.
*
*/
static void at24_writedatabyte(unsigned long regaddr, unsigned char byte)
{
int i;
for (i = 0; i < 8; i++) {
at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
at24_setlines(regaddr, 1, (byte >> (7-i))&0x01);
at24_setlines(regaddr, 0, (byte >> (7-i))&0x01);
}
}
static int at24_sendfullcmd(unsigned long regaddr, unsigned char cmd, unsigned char addr)
{
if (at24_sendcmd(regaddr, cmd)) {
at24_writedatabyte(regaddr, addr);
return at24_waitforack(regaddr);
}
return 0;
}
static int at24_sendcmd(unsigned long regaddr, unsigned char cmd)
{
int i;
for (i = 0; i < 10; i++) {
at24_start(regaddr);
at24_writedatabyte(regaddr, cmd);
if (at24_waitforack(regaddr))
return 1;
}
return 0;
}
static unsigned char at24_readdatabit(unsigned long regaddr)
{
unsigned char val;
at24_setlines(regaddr, 0, 1);
at24_setlines(regaddr, 1, 1);
val = (inb(regaddr) & AT24_DATA)?1:0;
at24_setlines(regaddr, 1, 1);
at24_setlines(regaddr, 0, 1);
return val;
}
static unsigned char at24_readdatabyte(unsigned long regaddr)
{
unsigned char data = 0;
int i;
for (i = 0; i < 8; i++) {
data <<= 1;
data |= at24_readdatabit(regaddr);
}
return data;
}
static int at24_waitforack(unsigned long regaddr)
{
int i;
for (i = 0; i < 10; i++) {
if ((at24_readdatabit(regaddr) & 0x01) == 0x00)
return 1;
}
return 0;
}
static int at24_waitfornack(unsigned long regaddr)
{
int i;
for (i = 0; i < 10; i++) {
if ((at24_readdatabit(regaddr) & 0x01) == 0x01)
return 1;
}
return 0;
}
static void at24_setlines(unsigned long regaddr, unsigned char clock, unsigned char data)
{
unsigned char val = AT24_ENABLE;
if (clock)
val |= AT24_CLOCK;
if (data)
val |= AT24_DATA;
outb(val, regaddr);
tms380tr_wait(20); /* Very necessary. */
}
static void at24_start(unsigned long regaddr)
{
at24_setlines(regaddr, 0, 1);
at24_setlines(regaddr, 1, 1);
at24_setlines(regaddr, 1, 0);
at24_setlines(regaddr, 0, 1);
}
static unsigned char at24_readb(unsigned long regaddr, unsigned char addr)
{
unsigned char data = 0xff;
if (at24_sendfullcmd(regaddr, AT24_WRITE, addr)) {
if (at24_sendcmd(regaddr, AT24_READ)) {
data = at24_readdatabyte(regaddr);
if (!at24_waitfornack(regaddr))
data = 0xff;
}
}
return data;
}
/*
* Enable basic functions of the Madge chipset needed
* for initialization.
*/
static void abyss_enable(struct net_device *dev)
{
unsigned char reset_reg;
unsigned long ioaddr;
ioaddr = dev->base_addr;
reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
tms380tr_wait(100);
}
/*
* Enable the functions of the Madge chipset needed for
* full working order.
*/
static int abyss_chipset_init(struct net_device *dev)
{
unsigned char reset_reg;
unsigned long ioaddr;
ioaddr = dev->base_addr;
reset_reg = inb(ioaddr + PCIBM2_RESET_REG);
reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
reset_reg &= ~(PCIBM2_RESET_REG_CHIP_NRES |
PCIBM2_RESET_REG_FIFO_NRES |
PCIBM2_RESET_REG_SIF_NRES);
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
tms380tr_wait(100);
reset_reg |= PCIBM2_RESET_REG_CHIP_NRES;
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
reset_reg |= PCIBM2_RESET_REG_SIF_NRES;
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
reset_reg |= PCIBM2_RESET_REG_FIFO_NRES;
outb(reset_reg, ioaddr + PCIBM2_RESET_REG);
outb(PCIBM2_INT_CONTROL_REG_SINTEN |
PCIBM2_INT_CONTROL_REG_PCI_ERR_ENABLE,
ioaddr + PCIBM2_INT_CONTROL_REG);
outb(30, ioaddr + PCIBM2_FIFO_THRESHOLD);
return 0;
}
static inline void abyss_chipset_close(struct net_device *dev)
{
unsigned long ioaddr;
ioaddr = dev->base_addr;
outb(0, ioaddr + PCIBM2_RESET_REG);
}
/*
* Read configuration data from the AT24 SEEPROM on Madge cards.
*
*/
static void abyss_read_eeprom(struct net_device *dev)
{
struct net_local *tp;
unsigned long ioaddr;
unsigned short val;
int i;
tp = netdev_priv(dev);
ioaddr = dev->base_addr;
/* Must enable glue chip first */
abyss_enable(dev);
val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
PCIBM2_SEEPROM_RING_SPEED);
tp->DataRate = val?SPEED_4:SPEED_16; /* set open speed */
printk("%s: SEEPROM: ring speed: %dMb/sec\n", dev->name, tp->DataRate);
val = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
PCIBM2_SEEPROM_RAM_SIZE) * 128;
printk("%s: SEEPROM: adapter RAM: %dkb\n", dev->name, val);
dev->addr_len = 6;
for (i = 0; i < 6; i++)
dev->dev_addr[i] = at24_readb(ioaddr + PCIBM2_SEEPROM_REG,
PCIBM2_SEEPROM_BIA+i);
}
static int abyss_open(struct net_device *dev)
{
abyss_chipset_init(dev);
tms380tr_open(dev);
return 0;
}
static int abyss_close(struct net_device *dev)
{
tms380tr_close(dev);
abyss_chipset_close(dev);
return 0;
}
static void __devexit abyss_detach (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
BUG_ON(!dev);
unregister_netdev(dev);
release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
free_irq(dev->irq, dev);
tmsdev_term(dev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
}
static struct pci_driver abyss_driver = {
.name = "abyss",
.id_table = abyss_pci_tbl,
.probe = abyss_attach,
.remove = __devexit_p(abyss_detach),
};
static int __init abyss_init (void)
{
abyss_netdev_ops = tms380tr_netdev_ops;
abyss_netdev_ops.ndo_open = abyss_open;
abyss_netdev_ops.ndo_stop = abyss_close;
return pci_register_driver(&abyss_driver);
}
static void __exit abyss_rmmod (void)
{
pci_unregister_driver (&abyss_driver);
}
module_init(abyss_init);
module_exit(abyss_rmmod);
| gpl-2.0 |
KINGbabasula/android_kernel_oneplus_msm8974-kexec | drivers/gpu/drm/udl/udl_main.c | 5272 | 7965 | /*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include "drmP.h"
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
#define BULK_SIZE 512
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
#define WRITES_IN_FLIGHT (4)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
#define GET_URB_TIMEOUT HZ
#define FREE_URB_TIMEOUT (HZ*2)
static int udl_parse_vendor_descriptor(struct drm_device *dev,
struct usb_device *usbdev)
{
struct udl_device *udl = dev->dev_private;
char *desc;
char *buf;
char *desc_end;
u8 total_len = 0;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
return false;
desc = buf;
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
"%02x %02x %02x %02x %02x %02x %02x\n",
total_len, desc[0],
desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
desc[7], desc[8], desc[9], desc[10]);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
(desc[2] != 0x01) || /* version (2 bytes) */
(desc[3] != 0x00) ||
(desc[4] != total_len - 2)) /* length after type */
goto unrecognized;
desc_end = desc + total_len;
desc += 5; /* the fixed header we've already parsed */
while (desc < desc_end) {
u8 length;
u16 key;
key = *((u16 *) desc);
desc += sizeof(u16);
length = *desc;
desc++;
switch (key) {
case 0x0200: { /* max_area */
u32 max_area;
max_area = le32_to_cpu(*((u32 *)desc));
DRM_DEBUG("DL chip limited to %d pixel modes\n",
max_area);
udl->sku_pixel_limit = max_area;
break;
}
default:
break;
}
desc += length;
}
}
goto success;
unrecognized:
/* allow udlfb to load for now even if firmware unrecognized */
DRM_ERROR("Unrecognized vendor firmware descriptor\n");
success:
kfree(buf);
return true;
}
static void udl_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
release_urb_work.work);
up(&unode->dev->urbs.limit_sem);
}
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct udl_device *udl = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
atomic_set(&udl->lost_pixels, 1);
}
}
urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
spin_lock_irqsave(&udl->urbs.lock, flags);
list_add_tail(&unode->entry, &udl->urbs.list);
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
#if 0
/*
* When using fb_defio, we deadlock if up() is called
* while another is waiting. So queue to another process.
*/
if (fb_defio)
schedule_delayed_work(&unode->release_urb_work, 0);
else
#endif
up(&udl->urbs.limit_sem);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
int count = udl->urbs.count;
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
int ret;
unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
/* Getting interrupted means a leak, but ok at shutdown*/
ret = down_interruptible(&udl->urbs.limit_sem);
if (ret)
break;
spin_lock_irqsave(&udl->urbs.lock, flags);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
spin_unlock_irqrestore(&udl->urbs.lock, flags);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
udl->urbs.count = 0;
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
spin_lock_init(&udl->urbs.lock);
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
while (i < count) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
INIT_DELAYED_WORK(&unode->release_urb_work,
udl_release_urb_work);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
buf, size, udl_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &udl->urbs.list);
i++;
}
sema_init(&udl->urbs.limit_sem, i);
udl->urbs.count = i;
udl->urbs.available = i;
DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
struct urb *udl_get_urb(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
atomic_set(&udl->lost_pixels, 1);
DRM_INFO("wait for urb interrupted: %x available: %d\n",
ret, udl->urbs.available);
goto error;
}
spin_lock_irqsave(&udl->urbs.lock, flags);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
error:
return urb;
}
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
struct udl_device *udl = dev->dev_private;
int ret;
BUG_ON(len > udl->urbs.size);
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
udl_urb_completion(urb); /* because no one else will */
atomic_set(&udl->lost_pixels, 1);
DRM_ERROR("usb_submit_urb error %x\n", ret);
}
return ret;
}
int udl_driver_load(struct drm_device *dev, unsigned long flags)
{
struct udl_device *udl;
int ret;
DRM_DEBUG("\n");
udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
if (!udl)
return -ENOMEM;
udl->ddev = dev;
dev->dev_private = udl;
if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
DRM_ERROR("firmware not recognized. Assume incompatible device\n");
goto err;
}
if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
ret = -ENOMEM;
DRM_ERROR("udl_alloc_urb_list failed\n");
goto err;
}
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
ret = udl_fbdev_init(dev);
return 0;
err:
kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
udl_free_urb_list(dev);
return 0;
}
int udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
if (udl->urbs.count)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
udl_modeset_cleanup(dev);
kfree(udl);
return 0;
}
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_exynos5410 | arch/ia64/kvm/vcpu.c | 11672 | 51799 | /*
* kvm_vcpu.c: handling all virtual cpu related thing.
* Copyright (c) 2005, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Shaofan Li (Susue Li) <susie.li@intel.com>
* Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
* Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
* Xiantao Zhang <xiantao.zhang@intel.com>
*/
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <asm/processor.h>
#include <asm/ia64regs.h>
#include <asm/gcc_intrin.h>
#include <asm/kregs.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include "asm-offsets.h"
#include "vcpu.h"
/*
* Special notes:
* - Index by it/dt/rt sequence
* - Only existing mode transitions are allowed in this table
* - RSE is placed at lazy mode when emulating guest partial mode
* - If gva happens to be rr0 and rr4, only allowed case is identity
* mapping (gva=gpa), or panic! (How?)
*/
int mm_switch_table[8][8] = {
/* 2004/09/12(Kevin): Allow switch to self */
/*
* (it,dt,rt): (0,0,0) -> (1,1,1)
* This kind of transition usually occurs in the very early
* stage of Linux boot up procedure. Another case is in efi
* and pal calls. (see "arch/ia64/kernel/head.S")
*
* (it,dt,rt): (0,0,0) -> (0,1,1)
* This kind of transition is found when OSYa exits efi boot
* service. Due to gva = gpa in this case (Same region),
* data access can be satisfied though itlb entry for physical
* emulation is hit.
*/
{SW_SELF, 0, 0, SW_NOP, 0, 0, 0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0},
/*
* (it,dt,rt): (0,1,1) -> (1,1,1)
* This kind of transition is found in OSYa.
*
* (it,dt,rt): (0,1,1) -> (0,0,0)
* This kind of transition is found in OSYa
*/
{SW_NOP, 0, 0, SW_SELF, 0, 0, 0, SW_P2V},
/* (1,0,0)->(1,1,1) */
{0, 0, 0, 0, 0, 0, 0, SW_P2V},
/*
* (it,dt,rt): (1,0,1) -> (1,1,1)
* This kind of transition usually occurs when Linux returns
* from the low level TLB miss handlers.
* (see "arch/ia64/kernel/ivt.S")
*/
{0, 0, 0, 0, 0, SW_SELF, 0, SW_P2V},
{0, 0, 0, 0, 0, 0, 0, 0},
/*
* (it,dt,rt): (1,1,1) -> (1,0,1)
* This kind of transition usually occurs in Linux low level
* TLB miss handler. (see "arch/ia64/kernel/ivt.S")
*
* (it,dt,rt): (1,1,1) -> (0,0,0)
* This kind of transition usually occurs in pal and efi calls,
* which requires running in physical mode.
* (see "arch/ia64/kernel/head.S")
* (1,1,1)->(1,0,0)
*/
{SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF},
};
void physical_mode_init(struct kvm_vcpu *vcpu)
{
vcpu->arch.mode_flags = GUEST_IN_PHY;
}
void switch_to_physical_rid(struct kvm_vcpu *vcpu)
{
unsigned long psr;
/* Save original virtual mode rr[0] and rr[4] */
psr = ia64_clear_ic();
ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
ia64_srlz_d();
ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
ia64_srlz_d();
ia64_set_psr(psr);
return;
}
void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
{
unsigned long psr;
psr = ia64_clear_ic();
ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
ia64_srlz_d();
ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
ia64_srlz_d();
ia64_set_psr(psr);
return;
}
static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
{
return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
}
void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
struct ia64_psr new_psr)
{
int act;
act = mm_switch_action(old_psr, new_psr);
switch (act) {
case SW_V2P:
/*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
old_psr.val, new_psr.val);*/
switch_to_physical_rid(vcpu);
/*
* Set rse to enforced lazy, to prevent active rse
*save/restor when guest physical mode.
*/
vcpu->arch.mode_flags |= GUEST_IN_PHY;
break;
case SW_P2V:
switch_to_virtual_rid(vcpu);
/*
* recover old mode which is saved when entering
* guest physical mode
*/
vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
break;
case SW_SELF:
break;
case SW_NOP:
break;
default:
/* Sanity check */
break;
}
return;
}
/*
* In physical mode, insert tc/tr for region 0 and 4 uses
* RID[0] and RID[4] which is for physical mode emulation.
* However what those inserted tc/tr wants is rid for
* virtual mode. So original virtual rid needs to be restored
* before insert.
*
* Operations which required such switch include:
* - insertions (itc.*, itr.*)
* - purges (ptc.* and ptr.*)
* - tpa
* - tak
* - thash?, ttag?
* All above needs actual virtual rid for destination entry.
*/
void check_mm_mode_switch(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
struct ia64_psr new_psr)
{
if ((old_psr.dt != new_psr.dt)
|| (old_psr.it != new_psr.it)
|| (old_psr.rt != new_psr.rt))
switch_mm_mode(vcpu, old_psr, new_psr);
return;
}
/*
* In physical mode, insert tc/tr for region 0 and 4 uses
* RID[0] and RID[4] which is for physical mode emulation.
* However what those inserted tc/tr wants is rid for
* virtual mode. So original virtual rid needs to be restored
* before insert.
*
* Operations which required such switch include:
* - insertions (itc.*, itr.*)
* - purges (ptc.* and ptr.*)
* - tpa
* - tak
* - thash?, ttag?
* All above needs actual virtual rid for destination entry.
*/
void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
{
if (is_physical_mode(vcpu)) {
vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
switch_to_virtual_rid(vcpu);
}
return;
}
/* Recover always follows prepare */
void recover_if_physical_mode(struct kvm_vcpu *vcpu)
{
if (is_physical_mode(vcpu))
switch_to_physical_rid(vcpu);
vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
return;
}
#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
static u16 gr_info[32] = {
0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
RPT(r1), RPT(r2), RPT(r3),
RPT(r4), RPT(r5), RPT(r6), RPT(r7),
RPT(r8), RPT(r9), RPT(r10), RPT(r11),
RPT(r12), RPT(r13), RPT(r14), RPT(r15),
RPT(r16), RPT(r17), RPT(r18), RPT(r19),
RPT(r20), RPT(r21), RPT(r22), RPT(r23),
RPT(r24), RPT(r25), RPT(r26), RPT(r27),
RPT(r28), RPT(r29), RPT(r30), RPT(r31)
};
#define IA64_FIRST_STACKED_GR 32
#define IA64_FIRST_ROTATING_FR 32
static inline unsigned long
rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
{
reg += rrb;
if (reg >= sor)
reg -= sor;
return reg;
}
/*
* Return the (rotated) index for floating point register
* be in the REGNUM (REGNUM must range from 32-127,
* result is in the range from 0-95.
*/
static inline unsigned long fph_index(struct kvm_pt_regs *regs,
long regnum)
{
unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
}
/*
* The inverse of the above: given bspstore and the number of
* registers, calculate ar.bsp.
*/
static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
long num_regs)
{
long delta = ia64_rse_slot_num(addr) + num_regs;
int i = 0;
if (num_regs < 0)
delta -= 0x3e;
if (delta < 0) {
while (delta <= -0x3f) {
i--;
delta += 0x3f;
}
} else {
while (delta >= 0x3f) {
i++;
delta -= 0x3f;
}
}
return addr + num_regs + i;
}
static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
unsigned long *val, int *nat)
{
unsigned long *bsp, *addr, *rnat_addr, *bspstore;
unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
unsigned long nat_mask;
unsigned long old_rsc, new_rsc;
long sof = (regs->cr_ifs) & 0x7f;
long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
long ridx = r1 - 32;
if (ridx < sor)
ridx = rotate_reg(sor, rrb_gr, ridx);
old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
new_rsc = old_rsc&(~(0x3));
ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
bsp = kbs + (regs->loadrs >> 19);
addr = kvm_rse_skip_regs(bsp, -sof + ridx);
nat_mask = 1UL << ia64_rse_slot_num(addr);
rnat_addr = ia64_rse_rnat_addr(addr);
if (addr >= bspstore) {
ia64_flushrs();
ia64_mf();
bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
}
*val = *addr;
if (nat) {
if (bspstore < rnat_addr)
*nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
& nat_mask);
else
*nat = (int)!!((*rnat_addr) & nat_mask);
ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
}
}
void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
unsigned long val, unsigned long nat)
{
unsigned long *bsp, *bspstore, *addr, *rnat_addr;
unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
unsigned long nat_mask;
unsigned long old_rsc, new_rsc, psr;
unsigned long rnat;
long sof = (regs->cr_ifs) & 0x7f;
long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
long ridx = r1 - 32;
if (ridx < sor)
ridx = rotate_reg(sor, rrb_gr, ridx);
old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
/* put RSC to lazy mode, and set loadrs 0 */
new_rsc = old_rsc & (~0x3fff0003);
ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
addr = kvm_rse_skip_regs(bsp, -sof + ridx);
nat_mask = 1UL << ia64_rse_slot_num(addr);
rnat_addr = ia64_rse_rnat_addr(addr);
local_irq_save(psr);
bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
if (addr >= bspstore) {
ia64_flushrs();
ia64_mf();
*addr = val;
bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
rnat = ia64_getreg(_IA64_REG_AR_RNAT);
if (bspstore < rnat_addr)
rnat = rnat & (~nat_mask);
else
*rnat_addr = (*rnat_addr)&(~nat_mask);
ia64_mf();
ia64_loadrs();
ia64_setreg(_IA64_REG_AR_RNAT, rnat);
} else {
rnat = ia64_getreg(_IA64_REG_AR_RNAT);
*addr = val;
if (bspstore < rnat_addr)
rnat = rnat&(~nat_mask);
else
*rnat_addr = (*rnat_addr) & (~nat_mask);
ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
ia64_setreg(_IA64_REG_AR_RNAT, rnat);
}
local_irq_restore(psr);
ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
}
void getreg(unsigned long regnum, unsigned long *val,
int *nat, struct kvm_pt_regs *regs)
{
unsigned long addr, *unat;
if (regnum >= IA64_FIRST_STACKED_GR) {
get_rse_reg(regs, regnum, val, nat);
return;
}
/*
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
unat = ®s->eml_unat;
addr += gr_info[regnum];
*val = *(unsigned long *)addr;
/*
* do it only when requested
*/
if (nat)
*nat = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
}
void setreg(unsigned long regnum, unsigned long val,
int nat, struct kvm_pt_regs *regs)
{
unsigned long addr;
unsigned long bitmask;
unsigned long *unat;
/*
* First takes care of stacked registers
*/
if (regnum >= IA64_FIRST_STACKED_GR) {
set_rse_reg(regs, regnum, val, nat);
return;
}
/*
* Now look at registers in [0-31] range and init correct UNAT
*/
addr = (unsigned long)regs;
unat = ®s->eml_unat;
/*
* add offset from base of struct
* and do it !
*/
addr += gr_info[regnum];
*(unsigned long *)addr = val;
/*
* We need to clear the corresponding UNAT bit to fully emulate the load
* UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
*/
bitmask = 1UL << ((addr >> 3) & 0x3f);
if (nat)
*unat |= bitmask;
else
*unat &= ~bitmask;
}
u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
unsigned long val;
if (!reg)
return 0;
getreg(reg, &val, 0, regs);
return val;
}
void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
long sof = (regs->cr_ifs) & 0x7f;
if (!reg)
return;
if (reg >= sof + 32)
return;
setreg(reg, value, nat, regs); /* FIXME: handle NATs later*/
}
void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct kvm_pt_regs *regs)
{
/* Take floating register rotation into consideration*/
if (regnum >= IA64_FIRST_ROTATING_FR)
regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
#define CASE_FIXED_FP(reg) \
case (reg) : \
ia64_stf_spill(fpval, reg); \
break
switch (regnum) {
CASE_FIXED_FP(0);
CASE_FIXED_FP(1);
CASE_FIXED_FP(2);
CASE_FIXED_FP(3);
CASE_FIXED_FP(4);
CASE_FIXED_FP(5);
CASE_FIXED_FP(6);
CASE_FIXED_FP(7);
CASE_FIXED_FP(8);
CASE_FIXED_FP(9);
CASE_FIXED_FP(10);
CASE_FIXED_FP(11);
CASE_FIXED_FP(12);
CASE_FIXED_FP(13);
CASE_FIXED_FP(14);
CASE_FIXED_FP(15);
CASE_FIXED_FP(16);
CASE_FIXED_FP(17);
CASE_FIXED_FP(18);
CASE_FIXED_FP(19);
CASE_FIXED_FP(20);
CASE_FIXED_FP(21);
CASE_FIXED_FP(22);
CASE_FIXED_FP(23);
CASE_FIXED_FP(24);
CASE_FIXED_FP(25);
CASE_FIXED_FP(26);
CASE_FIXED_FP(27);
CASE_FIXED_FP(28);
CASE_FIXED_FP(29);
CASE_FIXED_FP(30);
CASE_FIXED_FP(31);
CASE_FIXED_FP(32);
CASE_FIXED_FP(33);
CASE_FIXED_FP(34);
CASE_FIXED_FP(35);
CASE_FIXED_FP(36);
CASE_FIXED_FP(37);
CASE_FIXED_FP(38);
CASE_FIXED_FP(39);
CASE_FIXED_FP(40);
CASE_FIXED_FP(41);
CASE_FIXED_FP(42);
CASE_FIXED_FP(43);
CASE_FIXED_FP(44);
CASE_FIXED_FP(45);
CASE_FIXED_FP(46);
CASE_FIXED_FP(47);
CASE_FIXED_FP(48);
CASE_FIXED_FP(49);
CASE_FIXED_FP(50);
CASE_FIXED_FP(51);
CASE_FIXED_FP(52);
CASE_FIXED_FP(53);
CASE_FIXED_FP(54);
CASE_FIXED_FP(55);
CASE_FIXED_FP(56);
CASE_FIXED_FP(57);
CASE_FIXED_FP(58);
CASE_FIXED_FP(59);
CASE_FIXED_FP(60);
CASE_FIXED_FP(61);
CASE_FIXED_FP(62);
CASE_FIXED_FP(63);
CASE_FIXED_FP(64);
CASE_FIXED_FP(65);
CASE_FIXED_FP(66);
CASE_FIXED_FP(67);
CASE_FIXED_FP(68);
CASE_FIXED_FP(69);
CASE_FIXED_FP(70);
CASE_FIXED_FP(71);
CASE_FIXED_FP(72);
CASE_FIXED_FP(73);
CASE_FIXED_FP(74);
CASE_FIXED_FP(75);
CASE_FIXED_FP(76);
CASE_FIXED_FP(77);
CASE_FIXED_FP(78);
CASE_FIXED_FP(79);
CASE_FIXED_FP(80);
CASE_FIXED_FP(81);
CASE_FIXED_FP(82);
CASE_FIXED_FP(83);
CASE_FIXED_FP(84);
CASE_FIXED_FP(85);
CASE_FIXED_FP(86);
CASE_FIXED_FP(87);
CASE_FIXED_FP(88);
CASE_FIXED_FP(89);
CASE_FIXED_FP(90);
CASE_FIXED_FP(91);
CASE_FIXED_FP(92);
CASE_FIXED_FP(93);
CASE_FIXED_FP(94);
CASE_FIXED_FP(95);
CASE_FIXED_FP(96);
CASE_FIXED_FP(97);
CASE_FIXED_FP(98);
CASE_FIXED_FP(99);
CASE_FIXED_FP(100);
CASE_FIXED_FP(101);
CASE_FIXED_FP(102);
CASE_FIXED_FP(103);
CASE_FIXED_FP(104);
CASE_FIXED_FP(105);
CASE_FIXED_FP(106);
CASE_FIXED_FP(107);
CASE_FIXED_FP(108);
CASE_FIXED_FP(109);
CASE_FIXED_FP(110);
CASE_FIXED_FP(111);
CASE_FIXED_FP(112);
CASE_FIXED_FP(113);
CASE_FIXED_FP(114);
CASE_FIXED_FP(115);
CASE_FIXED_FP(116);
CASE_FIXED_FP(117);
CASE_FIXED_FP(118);
CASE_FIXED_FP(119);
CASE_FIXED_FP(120);
CASE_FIXED_FP(121);
CASE_FIXED_FP(122);
CASE_FIXED_FP(123);
CASE_FIXED_FP(124);
CASE_FIXED_FP(125);
CASE_FIXED_FP(126);
CASE_FIXED_FP(127);
}
#undef CASE_FIXED_FP
}
void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
struct kvm_pt_regs *regs)
{
/* Take floating register rotation into consideration*/
if (regnum >= IA64_FIRST_ROTATING_FR)
regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
#define CASE_FIXED_FP(reg) \
case (reg) : \
ia64_ldf_fill(reg, fpval); \
break
switch (regnum) {
CASE_FIXED_FP(2);
CASE_FIXED_FP(3);
CASE_FIXED_FP(4);
CASE_FIXED_FP(5);
CASE_FIXED_FP(6);
CASE_FIXED_FP(7);
CASE_FIXED_FP(8);
CASE_FIXED_FP(9);
CASE_FIXED_FP(10);
CASE_FIXED_FP(11);
CASE_FIXED_FP(12);
CASE_FIXED_FP(13);
CASE_FIXED_FP(14);
CASE_FIXED_FP(15);
CASE_FIXED_FP(16);
CASE_FIXED_FP(17);
CASE_FIXED_FP(18);
CASE_FIXED_FP(19);
CASE_FIXED_FP(20);
CASE_FIXED_FP(21);
CASE_FIXED_FP(22);
CASE_FIXED_FP(23);
CASE_FIXED_FP(24);
CASE_FIXED_FP(25);
CASE_FIXED_FP(26);
CASE_FIXED_FP(27);
CASE_FIXED_FP(28);
CASE_FIXED_FP(29);
CASE_FIXED_FP(30);
CASE_FIXED_FP(31);
CASE_FIXED_FP(32);
CASE_FIXED_FP(33);
CASE_FIXED_FP(34);
CASE_FIXED_FP(35);
CASE_FIXED_FP(36);
CASE_FIXED_FP(37);
CASE_FIXED_FP(38);
CASE_FIXED_FP(39);
CASE_FIXED_FP(40);
CASE_FIXED_FP(41);
CASE_FIXED_FP(42);
CASE_FIXED_FP(43);
CASE_FIXED_FP(44);
CASE_FIXED_FP(45);
CASE_FIXED_FP(46);
CASE_FIXED_FP(47);
CASE_FIXED_FP(48);
CASE_FIXED_FP(49);
CASE_FIXED_FP(50);
CASE_FIXED_FP(51);
CASE_FIXED_FP(52);
CASE_FIXED_FP(53);
CASE_FIXED_FP(54);
CASE_FIXED_FP(55);
CASE_FIXED_FP(56);
CASE_FIXED_FP(57);
CASE_FIXED_FP(58);
CASE_FIXED_FP(59);
CASE_FIXED_FP(60);
CASE_FIXED_FP(61);
CASE_FIXED_FP(62);
CASE_FIXED_FP(63);
CASE_FIXED_FP(64);
CASE_FIXED_FP(65);
CASE_FIXED_FP(66);
CASE_FIXED_FP(67);
CASE_FIXED_FP(68);
CASE_FIXED_FP(69);
CASE_FIXED_FP(70);
CASE_FIXED_FP(71);
CASE_FIXED_FP(72);
CASE_FIXED_FP(73);
CASE_FIXED_FP(74);
CASE_FIXED_FP(75);
CASE_FIXED_FP(76);
CASE_FIXED_FP(77);
CASE_FIXED_FP(78);
CASE_FIXED_FP(79);
CASE_FIXED_FP(80);
CASE_FIXED_FP(81);
CASE_FIXED_FP(82);
CASE_FIXED_FP(83);
CASE_FIXED_FP(84);
CASE_FIXED_FP(85);
CASE_FIXED_FP(86);
CASE_FIXED_FP(87);
CASE_FIXED_FP(88);
CASE_FIXED_FP(89);
CASE_FIXED_FP(90);
CASE_FIXED_FP(91);
CASE_FIXED_FP(92);
CASE_FIXED_FP(93);
CASE_FIXED_FP(94);
CASE_FIXED_FP(95);
CASE_FIXED_FP(96);
CASE_FIXED_FP(97);
CASE_FIXED_FP(98);
CASE_FIXED_FP(99);
CASE_FIXED_FP(100);
CASE_FIXED_FP(101);
CASE_FIXED_FP(102);
CASE_FIXED_FP(103);
CASE_FIXED_FP(104);
CASE_FIXED_FP(105);
CASE_FIXED_FP(106);
CASE_FIXED_FP(107);
CASE_FIXED_FP(108);
CASE_FIXED_FP(109);
CASE_FIXED_FP(110);
CASE_FIXED_FP(111);
CASE_FIXED_FP(112);
CASE_FIXED_FP(113);
CASE_FIXED_FP(114);
CASE_FIXED_FP(115);
CASE_FIXED_FP(116);
CASE_FIXED_FP(117);
CASE_FIXED_FP(118);
CASE_FIXED_FP(119);
CASE_FIXED_FP(120);
CASE_FIXED_FP(121);
CASE_FIXED_FP(122);
CASE_FIXED_FP(123);
CASE_FIXED_FP(124);
CASE_FIXED_FP(125);
CASE_FIXED_FP(126);
CASE_FIXED_FP(127);
}
}
void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
struct ia64_fpreg *val)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
getfpreg(reg, val, regs); /* FIXME: handle NATs later*/
}
void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
struct ia64_fpreg *val)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
if (reg > 1)
setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
}
/*
* The Altix RTC is mapped specially here for the vmm module
*/
#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
static long kvm_get_itc(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
if (kvm->arch.is_sn2)
return (*SN_RTC_BASE);
else
#endif
return ia64_getreg(_IA64_REG_AR_ITC);
}
/************************************************************************
* lsapic timer
***********************************************************************/
u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
{
unsigned long guest_itc;
guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
if (guest_itc >= VMX(vcpu, last_itc)) {
VMX(vcpu, last_itc) = guest_itc;
return guest_itc;
} else
return VMX(vcpu, last_itc);
}
static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
{
struct kvm_vcpu *v;
struct kvm *kvm;
int i;
long itc_offset = val - kvm_get_itc(vcpu);
unsigned long vitv = VCPU(vcpu, itv);
kvm = (struct kvm *)KVM_VM_BASE;
if (kvm_vcpu_is_bsp(vcpu)) {
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
VMX(v, itc_offset) = itc_offset;
VMX(v, last_itc) = 0;
}
}
VMX(vcpu, last_itc) = 0;
if (VCPU(vcpu, itm) <= val) {
VMX(vcpu, itc_check) = 0;
vcpu_unpend_interrupt(vcpu, vitv);
} else {
VMX(vcpu, itc_check) = 1;
vcpu_set_itm(vcpu, VCPU(vcpu, itm));
}
}
static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
{
return ((u64)VCPU(vcpu, itm));
}
static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
{
unsigned long vitv = VCPU(vcpu, itv);
VCPU(vcpu, itm) = val;
if (val > vcpu_get_itc(vcpu)) {
VMX(vcpu, itc_check) = 1;
vcpu_unpend_interrupt(vcpu, vitv);
VMX(vcpu, timer_pending) = 0;
} else
VMX(vcpu, itc_check) = 0;
}
#define ITV_VECTOR(itv) (itv&0xff)
#define ITV_IRQ_MASK(itv) (itv&(1<<16))
static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
{
VCPU(vcpu, itv) = val;
if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
vcpu->arch.timer_pending = 0;
}
}
static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
{
int vec;
vec = highest_inservice_irq(vcpu);
if (vec == NULL_VECTOR)
return;
VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
VCPU(vcpu, eoi) = 0;
vcpu->arch.irq_new_pending = 1;
}
/* See Table 5-8 in SDM vol2 for the definition */
int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
{
union ia64_tpr vtpr;
vtpr.val = VCPU(vcpu, tpr);
if (h_inservice == NMI_VECTOR)
return IRQ_MASKED_BY_INSVC;
if (h_pending == NMI_VECTOR) {
/* Non Maskable Interrupt */
return IRQ_NO_MASKED;
}
if (h_inservice == ExtINT_VECTOR)
return IRQ_MASKED_BY_INSVC;
if (h_pending == ExtINT_VECTOR) {
if (vtpr.mmi) {
/* mask all external IRQ */
return IRQ_MASKED_BY_VTPR;
} else
return IRQ_NO_MASKED;
}
if (is_higher_irq(h_pending, h_inservice)) {
if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
return IRQ_NO_MASKED;
else
return IRQ_MASKED_BY_VTPR;
} else {
return IRQ_MASKED_BY_INSVC;
}
}
void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
{
long spsr;
int ret;
local_irq_save(spsr);
ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
local_irq_restore(spsr);
vcpu->arch.irq_new_pending = 1;
}
void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
{
long spsr;
int ret;
local_irq_save(spsr);
ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
local_irq_restore(spsr);
if (ret) {
vcpu->arch.irq_new_pending = 1;
wmb();
}
}
void update_vhpi(struct kvm_vcpu *vcpu, int vec)
{
u64 vhpi;
if (vec == NULL_VECTOR)
vhpi = 0;
else if (vec == NMI_VECTOR)
vhpi = 32;
else if (vec == ExtINT_VECTOR)
vhpi = 16;
else
vhpi = vec >> 4;
VCPU(vcpu, vhpi) = vhpi;
if (VCPU(vcpu, vac).a_int)
ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
(u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
}
u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
{
int vec, h_inservice, mask;
vec = highest_pending_irq(vcpu);
h_inservice = highest_inservice_irq(vcpu);
mask = irq_masked(vcpu, vec, h_inservice);
if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
if (VCPU(vcpu, vhpi))
update_vhpi(vcpu, NULL_VECTOR);
return IA64_SPURIOUS_INT_VECTOR;
}
if (mask == IRQ_MASKED_BY_VTPR) {
update_vhpi(vcpu, vec);
return IA64_SPURIOUS_INT_VECTOR;
}
VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
vcpu_unpend_interrupt(vcpu, vec);
return (u64)vec;
}
/**************************************************************************
Privileged operation emulation routines
**************************************************************************/
u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
{
union ia64_pta vpta;
union ia64_rr vrr;
u64 pval;
u64 vhpt_offset;
vpta.val = vcpu_get_pta(vcpu);
vrr.val = vcpu_get_rr(vcpu, vadr);
vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
if (vpta.vf) {
pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
vpta.val, 0, 0, 0, 0);
} else {
pval = (vadr & VRN_MASK) | vhpt_offset |
(vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
}
return pval;
}
u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
{
union ia64_rr vrr;
union ia64_pta vpta;
u64 pval;
vpta.val = vcpu_get_pta(vcpu);
vrr.val = vcpu_get_rr(vcpu, vadr);
if (vpta.vf) {
pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
0, 0, 0, 0, 0);
} else
pval = 1;
return pval;
}
u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
{
struct thash_data *data;
union ia64_pta vpta;
u64 key;
vpta.val = vcpu_get_pta(vcpu);
if (vpta.vf == 0) {
key = 1;
return key;
}
data = vtlb_lookup(vcpu, vadr, D_TLB);
if (!data || !data->p)
key = 1;
else
key = data->key;
return key;
}
void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long thash, vadr;
vadr = vcpu_get_gr(vcpu, inst.M46.r3);
thash = vcpu_thash(vcpu, vadr);
vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
}
void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long tag, vadr;
vadr = vcpu_get_gr(vcpu, inst.M46.r3);
tag = vcpu_ttag(vcpu, vadr);
vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
}
int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
{
struct thash_data *data;
union ia64_isr visr, pt_isr;
struct kvm_pt_regs *regs;
struct ia64_psr vpsr;
regs = vcpu_regs(vcpu);
pt_isr.val = VMX(vcpu, cr_isr);
visr.val = 0;
visr.ei = pt_isr.ei;
visr.ir = pt_isr.ir;
vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
visr.na = 1;
data = vhpt_lookup(vadr);
if (data) {
if (data->p == 0) {
vcpu_set_isr(vcpu, visr.val);
data_page_not_present(vcpu, vadr);
return IA64_FAULT;
} else if (data->ma == VA_MATTR_NATPAGE) {
vcpu_set_isr(vcpu, visr.val);
dnat_page_consumption(vcpu, vadr);
return IA64_FAULT;
} else {
*padr = (data->gpaddr >> data->ps << data->ps) |
(vadr & (PSIZE(data->ps) - 1));
return IA64_NO_FAULT;
}
}
data = vtlb_lookup(vcpu, vadr, D_TLB);
if (data) {
if (data->p == 0) {
vcpu_set_isr(vcpu, visr.val);
data_page_not_present(vcpu, vadr);
return IA64_FAULT;
} else if (data->ma == VA_MATTR_NATPAGE) {
vcpu_set_isr(vcpu, visr.val);
dnat_page_consumption(vcpu, vadr);
return IA64_FAULT;
} else{
*padr = ((data->ppn >> (data->ps - 12)) << data->ps)
| (vadr & (PSIZE(data->ps) - 1));
return IA64_NO_FAULT;
}
}
if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
if (vpsr.ic) {
vcpu_set_isr(vcpu, visr.val);
alt_dtlb(vcpu, vadr);
return IA64_FAULT;
} else {
nested_dtlb(vcpu);
return IA64_FAULT;
}
} else {
if (vpsr.ic) {
vcpu_set_isr(vcpu, visr.val);
dvhpt_fault(vcpu, vadr);
return IA64_FAULT;
} else{
nested_dtlb(vcpu);
return IA64_FAULT;
}
}
return IA64_NO_FAULT;
}
int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r1, r3;
r3 = vcpu_get_gr(vcpu, inst.M46.r3);
if (vcpu_tpa(vcpu, r3, &r1))
return IA64_FAULT;
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
return(IA64_NO_FAULT);
}
void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r1, r3;
r3 = vcpu_get_gr(vcpu, inst.M46.r3);
r1 = vcpu_tak(vcpu, r3);
vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
}
/************************************
* Insert/Purge translation register/cache
************************************/
void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
{
thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
}
void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
{
thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
}
void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
{
u64 ps, va, rid;
struct thash_data *p_itr;
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
pte &= ~PAGE_FLAGS_RV_MASK;
rid = vcpu_get_rr(vcpu, ifa);
rid = rid & RR_RID_MASK;
p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
vcpu_set_tr(p_itr, pte, itir, va, rid);
vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
}
void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
{
u64 gpfn;
u64 ps, va, rid;
struct thash_data *p_dtr;
ps = itir_ps(itir);
va = PAGEALIGN(ifa, ps);
pte &= ~PAGE_FLAGS_RV_MASK;
if (ps != _PAGE_SIZE_16M)
thash_purge_entries(vcpu, va, ps);
gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
if (__gpfn_is_io(gpfn))
pte |= VTLB_PTE_IO;
rid = vcpu_get_rr(vcpu, va);
rid = rid & RR_RID_MASK;
p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
pte, itir, va, rid);
vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
}
void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
{
int index;
u64 va;
va = PAGEALIGN(ifa, ps);
while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
vcpu->arch.dtrs[index].page_flags = 0;
thash_purge_entries(vcpu, va, ps);
}
void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
{
int index;
u64 va;
va = PAGEALIGN(ifa, ps);
while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
vcpu->arch.itrs[index].page_flags = 0;
thash_purge_entries(vcpu, va, ps);
}
void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
{
va = PAGEALIGN(va, ps);
thash_purge_entries(vcpu, va, ps);
}
void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
{
thash_purge_all(vcpu);
}
void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
long psr;
local_irq_save(psr);
p->exit_reason = EXIT_REASON_PTC_G;
p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
p->u.ptc_g_data.vaddr = va;
p->u.ptc_g_data.ps = ps;
vmm_transition(vcpu);
/* Do Local Purge Here*/
vcpu_ptc_l(vcpu, va, ps);
local_irq_restore(psr);
}
void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
{
vcpu_ptc_ga(vcpu, va, ps);
}
void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
vcpu_ptc_e(vcpu, ifa);
}
void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa, itir;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
itir = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
}
void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa, itir;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
itir = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
}
void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa, itir;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
itir = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
}
void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa, itir;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
itir = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
}
void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long ifa, itir;
ifa = vcpu_get_gr(vcpu, inst.M45.r3);
itir = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
}
void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long itir, ifa, pte, slot;
slot = vcpu_get_gr(vcpu, inst.M45.r3);
pte = vcpu_get_gr(vcpu, inst.M45.r2);
itir = vcpu_get_itir(vcpu);
ifa = vcpu_get_ifa(vcpu);
vcpu_itr_d(vcpu, slot, pte, itir, ifa);
}
void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long itir, ifa, pte, slot;
slot = vcpu_get_gr(vcpu, inst.M45.r3);
pte = vcpu_get_gr(vcpu, inst.M45.r2);
itir = vcpu_get_itir(vcpu);
ifa = vcpu_get_ifa(vcpu);
vcpu_itr_i(vcpu, slot, pte, itir, ifa);
}
void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long itir, ifa, pte;
itir = vcpu_get_itir(vcpu);
ifa = vcpu_get_ifa(vcpu);
pte = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_itc_d(vcpu, pte, itir, ifa);
}
void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long itir, ifa, pte;
itir = vcpu_get_itir(vcpu);
ifa = vcpu_get_ifa(vcpu);
pte = vcpu_get_gr(vcpu, inst.M45.r2);
vcpu_itc_i(vcpu, pte, itir, ifa);
}
/*************************************
* Moves to semi-privileged registers
*************************************/
void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long imm;
if (inst.M30.s)
imm = -inst.M30.imm;
else
imm = inst.M30.imm;
vcpu_set_itc(vcpu, imm);
}
void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r2;
r2 = vcpu_get_gr(vcpu, inst.M29.r2);
vcpu_set_itc(vcpu, r2);
}
void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r1;
r1 = vcpu_get_itc(vcpu);
vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
}
/**************************************************************************
struct kvm_vcpu protection key register access routines
**************************************************************************/
unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
{
return ((unsigned long)ia64_get_pkr(reg));
}
void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
{
ia64_set_pkr(reg, val);
}
/********************************
* Moves to privileged registers
********************************/
unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
unsigned long val)
{
union ia64_rr oldrr, newrr;
unsigned long rrval;
struct exit_ctl_data *p = &vcpu->arch.exit_data;
unsigned long psr;
oldrr.val = vcpu_get_rr(vcpu, reg);
newrr.val = val;
vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
switch ((unsigned long)(reg >> VRN_SHIFT)) {
case VRN6:
vcpu->arch.vmm_rr = vrrtomrr(val);
local_irq_save(psr);
p->exit_reason = EXIT_REASON_SWITCH_RR6;
vmm_transition(vcpu);
local_irq_restore(psr);
break;
case VRN4:
rrval = vrrtomrr(val);
vcpu->arch.metaphysical_saved_rr4 = rrval;
if (!is_physical_mode(vcpu))
ia64_set_rr(reg, rrval);
break;
case VRN0:
rrval = vrrtomrr(val);
vcpu->arch.metaphysical_saved_rr0 = rrval;
if (!is_physical_mode(vcpu))
ia64_set_rr(reg, rrval);
break;
default:
ia64_set_rr(reg, vrrtomrr(val));
break;
}
return (IA64_NO_FAULT);
}
void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r2;
r3 = vcpu_get_gr(vcpu, inst.M42.r3);
r2 = vcpu_get_gr(vcpu, inst.M42.r2);
vcpu_set_rr(vcpu, r3, r2);
}
void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
{
}
void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
{
}
void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r2;
r3 = vcpu_get_gr(vcpu, inst.M42.r3);
r2 = vcpu_get_gr(vcpu, inst.M42.r2);
vcpu_set_pmc(vcpu, r3, r2);
}
void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r2;
r3 = vcpu_get_gr(vcpu, inst.M42.r3);
r2 = vcpu_get_gr(vcpu, inst.M42.r2);
vcpu_set_pmd(vcpu, r3, r2);
}
void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
{
u64 r3, r2;
r3 = vcpu_get_gr(vcpu, inst.M42.r3);
r2 = vcpu_get_gr(vcpu, inst.M42.r2);
vcpu_set_pkr(vcpu, r3, r2);
}
void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_rr(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_pkr(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_dbr(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_ibr(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_pmc(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
{
/* FIXME: This could get called as a result of a rsvd-reg fault */
if (reg > (ia64_get_cpuid(3) & 0xff))
return 0;
else
return ia64_get_cpuid(reg);
}
void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r3, r1;
r3 = vcpu_get_gr(vcpu, inst.M43.r3);
r1 = vcpu_get_cpuid(vcpu, r3);
vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
}
void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
{
VCPU(vcpu, tpr) = val;
vcpu->arch.irq_check = 1;
}
unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long r2;
r2 = vcpu_get_gr(vcpu, inst.M32.r2);
VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
switch (inst.M32.cr3) {
case 0:
vcpu_set_dcr(vcpu, r2);
break;
case 1:
vcpu_set_itm(vcpu, r2);
break;
case 66:
vcpu_set_tpr(vcpu, r2);
break;
case 67:
vcpu_set_eoi(vcpu, r2);
break;
default:
break;
}
return 0;
}
unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long tgt = inst.M33.r1;
unsigned long val;
switch (inst.M33.cr3) {
case 65:
val = vcpu_get_ivr(vcpu);
vcpu_set_gr(vcpu, tgt, val, 0);
break;
case 67:
vcpu_set_gr(vcpu, tgt, 0L, 0);
break;
default:
val = VCPU(vcpu, vcr[inst.M33.cr3]);
vcpu_set_gr(vcpu, tgt, val, 0);
break;
}
return 0;
}
void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long mask;
struct kvm_pt_regs *regs;
struct ia64_psr old_psr, new_psr;
old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
regs = vcpu_regs(vcpu);
/* We only support guest as:
* vpsr.pk = 0
* vpsr.is = 0
* Otherwise panic
*/
if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
"& vpsr.is=0\n");
/*
* For those IA64_PSR bits: id/da/dd/ss/ed/ia
* Since these bits will become 0, after success execution of each
* instruction, we will change set them to mIA64_PSR
*/
VCPU(vcpu, vpsr) = val
& (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
if (!old_psr.i && (val & IA64_PSR_I)) {
/* vpsr.i 0->1 */
vcpu->arch.irq_check = 1;
}
new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
/*
* All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
* , except for the following bits:
* ic/i/dt/si/rt/mc/it/bn/vm
*/
mask = IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
IA64_PSR_VM;
regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
check_mm_mode_switch(vcpu, old_psr, new_psr);
return ;
}
unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
{
struct ia64_psr vpsr;
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
if (!vpsr.ic)
VCPU(vcpu, ifs) = regs->cr_ifs;
regs->cr_ifs = IA64_IFS_V;
return (IA64_NO_FAULT);
}
/**************************************************************************
VCPU banked general register access routines
**************************************************************************/
#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
do { \
__asm__ __volatile__ ( \
";;extr.u %0 = %3,%6,16;;\n" \
"dep %1 = %0, %1, 0, 16;;\n" \
"st8 [%4] = %1\n" \
"extr.u %0 = %2, 16, 16;;\n" \
"dep %3 = %0, %3, %6, 16;;\n" \
"st8 [%5] = %3\n" \
::"r"(i), "r"(*b1unat), "r"(*b0unat), \
"r"(*runat), "r"(b1unat), "r"(runat), \
"i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
} while (0)
void vcpu_bsw0(struct kvm_vcpu *vcpu)
{
unsigned long i;
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
unsigned long *b1 = &VCPU(vcpu, vgr[0]);
unsigned long *runat = ®s->eml_unat;
unsigned long *b0unat = &VCPU(vcpu, vbnat);
unsigned long *b1unat = &VCPU(vcpu, vnat);
if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
for (i = 0; i < 16; i++) {
*b1++ = *r;
*r++ = *b0++;
}
vcpu_bsw0_unat(i, b0unat, b1unat, runat,
VMM_PT_REGS_R16_SLOT);
VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
}
}
#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
do { \
__asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n" \
"dep %1 = %0, %1, 16, 16;;\n" \
"st8 [%4] = %1\n" \
"extr.u %0 = %2, 0, 16;;\n" \
"dep %3 = %0, %3, %6, 16;;\n" \
"st8 [%5] = %3\n" \
::"r"(i), "r"(*b0unat), "r"(*b1unat), \
"r"(*runat), "r"(b0unat), "r"(runat), \
"i"(VMM_PT_REGS_R16_SLOT) : "memory"); \
} while (0)
void vcpu_bsw1(struct kvm_vcpu *vcpu)
{
unsigned long i;
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
unsigned long *r = ®s->r16;
unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
unsigned long *b1 = &VCPU(vcpu, vgr[0]);
unsigned long *runat = ®s->eml_unat;
unsigned long *b0unat = &VCPU(vcpu, vbnat);
unsigned long *b1unat = &VCPU(vcpu, vnat);
if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
for (i = 0; i < 16; i++) {
*b0++ = *r;
*r++ = *b1++;
}
vcpu_bsw1_unat(i, b0unat, b1unat, runat,
VMM_PT_REGS_R16_SLOT);
VCPU(vcpu, vpsr) |= IA64_PSR_BN;
}
}
void vcpu_rfi(struct kvm_vcpu *vcpu)
{
unsigned long ifs, psr;
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
psr = VCPU(vcpu, ipsr);
if (psr & IA64_PSR_BN)
vcpu_bsw1(vcpu);
else
vcpu_bsw0(vcpu);
vcpu_set_psr(vcpu, psr);
ifs = VCPU(vcpu, ifs);
if (ifs >> 63)
regs->cr_ifs = ifs;
regs->cr_iip = VCPU(vcpu, iip);
}
/*
VPSR can't keep track of below bits of guest PSR
This function gets guest PSR
*/
unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
{
unsigned long mask;
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
}
void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long vpsr;
unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
| inst.M44.imm;
vpsr = vcpu_get_psr(vcpu);
vpsr &= (~imm24);
vcpu_set_psr(vcpu, vpsr);
}
void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long vpsr;
unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
| inst.M44.imm;
vpsr = vcpu_get_psr(vcpu);
vpsr |= imm24;
vcpu_set_psr(vcpu, vpsr);
}
/* Generate Mask
* Parameter:
* bit -- starting bit
* len -- how many bits
*/
#define MASK(bit,len) \
({ \
__u64 ret; \
\
__asm __volatile("dep %0=-1, r0, %1, %2"\
: "=r" (ret): \
"M" (bit), \
"M" (len)); \
ret; \
})
void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
{
val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
vcpu_set_psr(vcpu, val);
}
void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long val;
val = vcpu_get_gr(vcpu, inst.M35.r2);
vcpu_set_psr_l(vcpu, val);
}
void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
{
unsigned long val;
val = vcpu_get_psr(vcpu);
val = (val & MASK(0, 32)) | (val & MASK(35, 2));
vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
}
void vcpu_increment_iip(struct kvm_vcpu *vcpu)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
if (ipsr->ri == 2) {
ipsr->ri = 0;
regs->cr_iip += 16;
} else
ipsr->ri++;
}
void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
{
struct kvm_pt_regs *regs = vcpu_regs(vcpu);
struct ia64_psr *ipsr = (struct ia64_psr *)®s->cr_ipsr;
if (ipsr->ri == 0) {
ipsr->ri = 2;
regs->cr_iip -= 16;
} else
ipsr->ri--;
}
/** Emulate a privileged operation.
*
*
* @param vcpu virtual cpu
* @cause the reason cause virtualization fault
* @opcode the instruction code which cause virtualization fault
*/
void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
{
unsigned long status, cause, opcode ;
INST64 inst;
status = IA64_NO_FAULT;
cause = VMX(vcpu, cause);
opcode = VMX(vcpu, opcode);
inst.inst = opcode;
/*
* Switch to actual virtual rid in rr0 and rr4,
* which is required by some tlb related instructions.
*/
prepare_if_physical_mode(vcpu);
switch (cause) {
case EVENT_RSM:
kvm_rsm(vcpu, inst);
break;
case EVENT_SSM:
kvm_ssm(vcpu, inst);
break;
case EVENT_MOV_TO_PSR:
kvm_mov_to_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_PSR:
kvm_mov_from_psr(vcpu, inst);
break;
case EVENT_MOV_FROM_CR:
kvm_mov_from_cr(vcpu, inst);
break;
case EVENT_MOV_TO_CR:
kvm_mov_to_cr(vcpu, inst);
break;
case EVENT_BSW_0:
vcpu_bsw0(vcpu);
break;
case EVENT_BSW_1:
vcpu_bsw1(vcpu);
break;
case EVENT_COVER:
vcpu_cover(vcpu);
break;
case EVENT_RFI:
vcpu_rfi(vcpu);
break;
case EVENT_ITR_D:
kvm_itr_d(vcpu, inst);
break;
case EVENT_ITR_I:
kvm_itr_i(vcpu, inst);
break;
case EVENT_PTR_D:
kvm_ptr_d(vcpu, inst);
break;
case EVENT_PTR_I:
kvm_ptr_i(vcpu, inst);
break;
case EVENT_ITC_D:
kvm_itc_d(vcpu, inst);
break;
case EVENT_ITC_I:
kvm_itc_i(vcpu, inst);
break;
case EVENT_PTC_L:
kvm_ptc_l(vcpu, inst);
break;
case EVENT_PTC_G:
kvm_ptc_g(vcpu, inst);
break;
case EVENT_PTC_GA:
kvm_ptc_ga(vcpu, inst);
break;
case EVENT_PTC_E:
kvm_ptc_e(vcpu, inst);
break;
case EVENT_MOV_TO_RR:
kvm_mov_to_rr(vcpu, inst);
break;
case EVENT_MOV_FROM_RR:
kvm_mov_from_rr(vcpu, inst);
break;
case EVENT_THASH:
kvm_thash(vcpu, inst);
break;
case EVENT_TTAG:
kvm_ttag(vcpu, inst);
break;
case EVENT_TPA:
status = kvm_tpa(vcpu, inst);
break;
case EVENT_TAK:
kvm_tak(vcpu, inst);
break;
case EVENT_MOV_TO_AR_IMM:
kvm_mov_to_ar_imm(vcpu, inst);
break;
case EVENT_MOV_TO_AR:
kvm_mov_to_ar_reg(vcpu, inst);
break;
case EVENT_MOV_FROM_AR:
kvm_mov_from_ar_reg(vcpu, inst);
break;
case EVENT_MOV_TO_DBR:
kvm_mov_to_dbr(vcpu, inst);
break;
case EVENT_MOV_TO_IBR:
kvm_mov_to_ibr(vcpu, inst);
break;
case EVENT_MOV_TO_PMC:
kvm_mov_to_pmc(vcpu, inst);
break;
case EVENT_MOV_TO_PMD:
kvm_mov_to_pmd(vcpu, inst);
break;
case EVENT_MOV_TO_PKR:
kvm_mov_to_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_DBR:
kvm_mov_from_dbr(vcpu, inst);
break;
case EVENT_MOV_FROM_IBR:
kvm_mov_from_ibr(vcpu, inst);
break;
case EVENT_MOV_FROM_PMC:
kvm_mov_from_pmc(vcpu, inst);
break;
case EVENT_MOV_FROM_PKR:
kvm_mov_from_pkr(vcpu, inst);
break;
case EVENT_MOV_FROM_CPUID:
kvm_mov_from_cpuid(vcpu, inst);
break;
case EVENT_VMSW:
status = IA64_FAULT;
break;
default:
break;
};
/*Assume all status is NO_FAULT ?*/
if (status == IA64_NO_FAULT && cause != EVENT_RFI)
vcpu_increment_iip(vcpu);
recover_if_physical_mode(vcpu);
}
void init_vcpu(struct kvm_vcpu *vcpu)
{
int i;
vcpu->arch.mode_flags = GUEST_IN_PHY;
VMX(vcpu, vrr[0]) = 0x38;
VMX(vcpu, vrr[1]) = 0x38;
VMX(vcpu, vrr[2]) = 0x38;
VMX(vcpu, vrr[3]) = 0x38;
VMX(vcpu, vrr[4]) = 0x38;
VMX(vcpu, vrr[5]) = 0x38;
VMX(vcpu, vrr[6]) = 0x38;
VMX(vcpu, vrr[7]) = 0x38;
VCPU(vcpu, vpsr) = IA64_PSR_BN;
VCPU(vcpu, dcr) = 0;
/* pta.size must not be 0. The minimum is 15 (32k) */
VCPU(vcpu, pta) = 15 << 2;
VCPU(vcpu, itv) = 0x10000;
VCPU(vcpu, itm) = 0;
VMX(vcpu, last_itc) = 0;
VCPU(vcpu, lid) = VCPU_LID(vcpu);
VCPU(vcpu, ivr) = 0;
VCPU(vcpu, tpr) = 0x10000;
VCPU(vcpu, eoi) = 0;
VCPU(vcpu, irr[0]) = 0;
VCPU(vcpu, irr[1]) = 0;
VCPU(vcpu, irr[2]) = 0;
VCPU(vcpu, irr[3]) = 0;
VCPU(vcpu, pmv) = 0x10000;
VCPU(vcpu, cmcv) = 0x10000;
VCPU(vcpu, lrr0) = 0x10000; /* default reset value? */
VCPU(vcpu, lrr1) = 0x10000; /* default reset value? */
update_vhpi(vcpu, NULL_VECTOR);
VLSAPIC_XTP(vcpu) = 0x80; /* disabled */
for (i = 0; i < 4; i++)
VLSAPIC_INSVC(vcpu, i) = 0;
}
void kvm_init_all_rr(struct kvm_vcpu *vcpu)
{
unsigned long psr;
local_irq_save(psr);
/* WARNING: not allow co-exist of both virtual mode and physical
* mode in same region
*/
vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
if (is_physical_mode(vcpu)) {
if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
panic_vm(vcpu, "Machine Status conflicts!\n");
ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
ia64_dv_serialize_data();
ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
ia64_dv_serialize_data();
} else {
ia64_set_rr((VRN0 << VRN_SHIFT),
vcpu->arch.metaphysical_saved_rr0);
ia64_dv_serialize_data();
ia64_set_rr((VRN4 << VRN_SHIFT),
vcpu->arch.metaphysical_saved_rr4);
ia64_dv_serialize_data();
}
ia64_set_rr((VRN1 << VRN_SHIFT),
vrrtomrr(VMX(vcpu, vrr[VRN1])));
ia64_dv_serialize_data();
ia64_set_rr((VRN2 << VRN_SHIFT),
vrrtomrr(VMX(vcpu, vrr[VRN2])));
ia64_dv_serialize_data();
ia64_set_rr((VRN3 << VRN_SHIFT),
vrrtomrr(VMX(vcpu, vrr[VRN3])));
ia64_dv_serialize_data();
ia64_set_rr((VRN5 << VRN_SHIFT),
vrrtomrr(VMX(vcpu, vrr[VRN5])));
ia64_dv_serialize_data();
ia64_set_rr((VRN7 << VRN_SHIFT),
vrrtomrr(VMX(vcpu, vrr[VRN7])));
ia64_dv_serialize_data();
ia64_srlz_d();
ia64_set_psr(psr);
}
int vmm_entry(void)
{
struct kvm_vcpu *v;
v = current_vcpu;
ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
0, 0, 0, 0, 0, 0);
kvm_init_vtlb(v);
kvm_init_vhpt(v);
init_vcpu(v);
kvm_init_all_rr(v);
vmm_reset_entry();
return 0;
}
static void kvm_show_registers(struct kvm_pt_regs *regs)
{
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
struct kvm_vcpu *vcpu = current_vcpu;
if (vcpu != NULL)
printk("vcpu 0x%p vcpu %d\n",
vcpu, vcpu->vcpu_id);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n",
regs->cr_ipsr, regs->cr_ifs, ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
printk("rnat: %016lx bspstore: %016lx pr : %016lx\n",
regs->ar_rnat, regs->ar_bspstore, regs->pr);
printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0,
regs->b6, regs->b7);
printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
regs->f6.u.bits[1], regs->f6.u.bits[0],
regs->f7.u.bits[1], regs->f7.u.bits[0]);
printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
regs->f8.u.bits[1], regs->f8.u.bits[0],
regs->f9.u.bits[1], regs->f9.u.bits[0]);
printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
regs->f10.u.bits[1], regs->f10.u.bits[0],
regs->f11.u.bits[1], regs->f11.u.bits[0]);
printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1,
regs->r2, regs->r3);
printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8,
regs->r9, regs->r10);
printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
regs->r12, regs->r13);
printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
regs->r15, regs->r16);
printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
regs->r18, regs->r19);
printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
regs->r21, regs->r22);
printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
regs->r24, regs->r25);
printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
regs->r27, regs->r28);
printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
regs->r30, regs->r31);
}
void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
{
va_list args;
char buf[256];
struct kvm_pt_regs *regs = vcpu_regs(v);
struct exit_ctl_data *p = &v->arch.exit_data;
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
printk(buf);
kvm_show_registers(regs);
p->exit_reason = EXIT_REASON_VM_PANIC;
vmm_transition(v);
/*Never to return*/
while (1);
}
| gpl-2.0 |
Crossbones/crossbones_kernel_tuna | drivers/gpu/drm/nouveau/nouveau_connector.c | 1945 | 26764 | /*
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <acpi/button.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int i, id;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
id = connector->encoder_ids[i];
if (!id)
break;
obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
return nv_encoder;
}
return NULL;
}
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
{
struct drm_device *dev = to_drm_encoder(encoder)->dev;
struct drm_connector *drm_connector;
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
if (drm_connector->encoder == to_drm_encoder(encoder))
return nouveau_connector(drm_connector);
}
return NULL;
}
/*TODO: This could use improvement, and learn to handle the fixed
* BIOS tables etc. It's fine currently, for its only user.
*/
int
nouveau_connector_bpp(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
if (nv_connector->edid && nv_connector->edid->revision >= 4) {
u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
if (bpc > 4)
return bpc;
}
return 18;
}
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv;
struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
return;
dev = nv_connector->base.dev;
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
pgpio = &dev_priv->engine.gpio;
if (pgpio->irq_unregister) {
pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
nouveau_connector_hotplug, connector);
}
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
nouveau_backlight_exit(connector);
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static struct nouveau_i2c_chan *
nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct nouveau_i2c_chan *i2c = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
id = connector->encoder_ids[i];
if (!id)
break;
obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
if (nv_encoder->dcb->i2c_index < 0xf)
i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
return i2c;
}
}
return NULL;
}
static struct nouveau_encoder *
nouveau_connector_of_detect(struct drm_connector *connector)
{
#ifdef __powerpc__
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
if (!dn ||
!((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
(nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
const char *name = of_get_property(cn, "name", NULL);
const void *edid = of_get_property(cn, "EDID", NULL);
int idx = name ? name[strlen(name) - 1] - 'A' : 0;
if (nv_encoder->dcb->i2c_index == idx && edid) {
nv_connector->edid =
kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
of_node_put(cn);
return nv_encoder;
}
}
#endif
return NULL;
}
static void
nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct drm_device *dev = connector->dev;
if (nv_connector->detected_encoder == nv_encoder)
return;
nv_connector->detected_encoder = nv_encoder;
if (nv_encoder->dcb->type == OUTPUT_LVDS ||
nv_encoder->dcb->type == OUTPUT_TMDS) {
connector->doublescan_allowed = false;
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
if (dev_priv->card_type == NV_20 ||
(dev_priv->card_type == NV_10 &&
(dev->pci_device & 0x0ff0) != 0x0100 &&
(dev->pci_device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
connector->interlace_allowed = true;
}
if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
DRM_MODE_SUBCONNECTOR_DVIA);
}
}
static enum drm_connector_status
nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_i2c_chan *i2c;
int type;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
drm_mode_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
drm_get_connector_name(connector));
goto detect_analog;
}
if (nv_encoder->dcb->type == OUTPUT_DP &&
!nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
NV_ERROR(dev, "Detected %s, but failed init\n",
drm_get_connector_name(connector));
return connector_status_disconnected;
}
/* Override encoder type for DVI-I based on whether EDID
* says the display is digital or analog, both use the
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
type = OUTPUT_ANALOG;
nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
NV_ERROR(dev, "Detected %d encoder on %s, "
"but no object!\n", type,
drm_get_connector_name(connector));
return connector_status_disconnected;
}
}
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
nv_encoder = nouveau_connector_of_detect(connector);
if (nv_encoder) {
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
if (helper->detect(encoder, connector) ==
connector_status_connected) {
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
}
return connector_status_disconnected;
}
static enum drm_connector_status
nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
enum drm_connector_status status = connector_status_disconnected;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
drm_mode_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (!nv_encoder)
return connector_status_disconnected;
/* Try retrieving EDID via DDC */
if (!dev_priv->vbios.fp_no_ddc) {
status = nouveau_connector_detect(connector, force);
if (status == connector_status_connected)
goto out;
}
/* On some laptops (Sony, i'm looking at you) there appears to
* be no direct way of accessing the panel's EDID. The only
* option available to us appears to be to ask ACPI for help..
*
* It's important this check's before trying straps, one of the
* said manufacturer's laptops are configured in such a way
* the nouveau decides an entry in the VBIOS FP mode table is
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
if (!nouveau_acpi_edid(dev, connector)) {
status = connector_status_connected;
goto out;
}
}
/* If no EDID found above, and the VBIOS indicates a hardcoded
* modeline is avalilable for the panel, set it as the panel's
* native mode and exit.
*/
if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc ||
nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
status = connector_status_connected;
goto out;
}
/* Still nothing, some VBIOS images have a hardcoded EDID block
* stored for the panel stored in them.
*/
if (!dev_priv->vbios.fp_no_ddc) {
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
*(nv_connector->edid) = *edid;
status = connector_status_connected;
}
}
out:
#if defined(CONFIG_ACPI_BUTTON) || \
(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (status == connector_status_connected &&
!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
drm_mode_connector_update_edid_property(connector, nv_connector->edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
static void
nouveau_connector_force(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
type = OUTPUT_ANALOG;
} else
type = OUTPUT_ANY;
nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
connector->status = connector_status_disconnected;
return;
}
nouveau_connector_set_encoder(connector, nv_encoder);
}
static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
int ret;
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
struct nouveau_crtc *nv_crtc = NULL;
bool modeset = false;
switch (value) {
case DRM_MODE_SCALE_NONE:
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -EINVAL;
}
/* LVDS always needs gpu scaling */
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
value == DRM_MODE_SCALE_NONE)
return -EINVAL;
/* Changing between GPU and panel scaling requires a full
* modeset
*/
if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
(value == DRM_MODE_SCALE_NONE))
modeset = true;
nv_connector->scaling_mode = value;
if (connector->encoder && connector->encoder->crtc)
nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc)
return 0;
if (modeset || !nv_crtc->set_scale) {
ret = drm_crtc_helper_set_mode(&nv_crtc->base,
&nv_crtc->base.mode,
nv_crtc->base.x,
nv_crtc->base.y, NULL);
if (!ret)
return -EINVAL;
} else {
ret = nv_crtc->set_scale(nv_crtc, value, true);
if (ret)
return ret;
}
return 0;
}
/* Dithering */
if (property == dev->mode_config.dithering_mode_property) {
struct nouveau_crtc *nv_crtc = NULL;
if (value == DRM_MODE_DITHERING_ON)
nv_connector->use_dithering = true;
else
nv_connector->use_dithering = false;
if (connector->encoder && connector->encoder->crtc)
nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
true);
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
return get_slave_funcs(encoder)->set_property(
encoder, connector, property, value);
return -EINVAL;
}
static struct drm_display_mode *
nouveau_connector_native_mode(struct drm_connector *connector)
{
struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
/* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG_KMS(dev, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
/* Otherwise, take the resolution with the largest width, then
* height, then vertical refresh
*/
if (mode->hdisplay < high_w)
continue;
if (mode->hdisplay == high_w && mode->vdisplay < high_h)
continue;
if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
mode->vrefresh < high_v)
continue;
high_w = mode->hdisplay;
high_h = mode->vdisplay;
high_v = mode->vrefresh;
largest = mode;
}
NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
high_w, high_h, high_v);
return largest ? drm_mode_duplicate(dev, largest) : NULL;
}
struct moderec {
int hdisplay;
int vdisplay;
};
static struct moderec scaler_modes[] = {
{ 1920, 1200 },
{ 1920, 1080 },
{ 1680, 1050 },
{ 1600, 1200 },
{ 1400, 1050 },
{ 1280, 1024 },
{ 1280, 960 },
{ 1152, 864 },
{ 1024, 768 },
{ 800, 600 },
{ 720, 400 },
{ 640, 480 },
{ 640, 400 },
{ 640, 350 },
{}
};
static int
nouveau_connector_scaler_modes_add(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_display_mode *native = nv_connector->native_mode, *m;
struct drm_device *dev = connector->dev;
struct moderec *mode = &scaler_modes[0];
int modes = 0;
if (!native)
return 0;
while (mode->hdisplay) {
if (mode->hdisplay <= native->hdisplay &&
mode->vdisplay <= native->vdisplay) {
m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(native), false,
false, false);
if (!m)
continue;
m->type |= DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, m);
modes++;
}
mode++;
}
return modes;
}
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret = 0;
/* destroy the native mode, the attached monitor could have changed.
*/
if (nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
}
if (nv_connector->edid)
ret = drm_add_edid_modes(connector, nv_connector->edid);
else
if (nv_encoder->dcb->type == OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
struct drm_display_mode mode;
nouveau_bios_fp_mode(dev, &mode);
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
drm_mode_probed_add(connector, mode);
ret = 1;
}
if (nv_encoder->dcb->type == OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
nv_connector->dcb->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
}
static unsigned
get_tmds_link_bandwidth(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct dcb_entry *dcb = nv_connector->detected_encoder->dcb;
if (dcb->location != DCB_LOC_ON_CHIP ||
dev_priv->chipset >= 0x46)
return 165000;
else if (dev_priv->chipset >= 0x40)
return 155000;
else if (dev_priv->chipset >= 0x18)
return 135000;
else
return 112000;
}
static int
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
unsigned clock = mode->clock;
switch (nv_encoder->dcb->type) {
case OUTPUT_LVDS:
if (nv_connector->native_mode &&
(mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
max_clock = 400000;
break;
case OUTPUT_TMDS:
max_clock = get_tmds_link_bandwidth(connector);
if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
max_clock *= 2;
break;
case OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
if (!max_clock)
max_clock = 350000;
break;
case OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case OUTPUT_DP:
if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
max_clock = nv_encoder->dp.link_nr * 270000;
else
max_clock = nv_encoder->dp.link_nr * 162000;
clock = clock * nouveau_connector_bpp(connector) / 8;
break;
default:
BUG_ON(1);
return MODE_BAD;
}
if (clock < min_clock)
return MODE_CLOCK_LOW;
if (clock > max_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static struct drm_encoder *
nouveau_connector_best_encoder(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
if (nv_connector->detected_encoder)
return to_drm_encoder(nv_connector->detected_encoder);
return NULL;
}
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
.mode_valid = nouveau_connector_mode_valid,
.best_encoder = nouveau_connector_best_encoder,
};
static const struct drm_connector_funcs
nouveau_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = NULL,
.restore = NULL,
.detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.force = nouveau_connector_force
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
.dpms = drm_helper_connector_dpms,
.save = NULL,
.restore = NULL,
.detect = nouveau_connector_detect_lvds,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.force = nouveau_connector_force
};
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_connector *nv_connector = NULL;
struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
int type, ret = 0;
NV_DEBUG_KMS(dev, "\n");
if (index >= dev_priv->vbios.dcb.connector.entries)
return ERR_PTR(-EINVAL);
dcb = &dev_priv->vbios.dcb.connector.entry[index];
if (dcb->drm)
return dcb->drm;
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
type = DRM_MODE_CONNECTOR_VGA;
break;
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
type = DRM_MODE_CONNECTOR_TV;
break;
case DCB_CONNECTOR_DVI_I:
type = DRM_MODE_CONNECTOR_DVII;
break;
case DCB_CONNECTOR_DVI_D:
type = DRM_MODE_CONNECTOR_DVID;
break;
case DCB_CONNECTOR_HDMI_0:
case DCB_CONNECTOR_HDMI_1:
type = DRM_MODE_CONNECTOR_HDMIA;
break;
case DCB_CONNECTOR_LVDS:
case DCB_CONNECTOR_LVDS_SPWG:
type = DRM_MODE_CONNECTOR_LVDS;
funcs = &nouveau_connector_funcs_lvds;
break;
case DCB_CONNECTOR_DP:
type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case DCB_CONNECTOR_eDP:
type = DRM_MODE_CONNECTOR_eDP;
break;
default:
NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
return ERR_PTR(-EINVAL);
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
nv_connector->dcb = dcb;
connector = &nv_connector->base;
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
/* Check if we need dithering enabled */
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
bool dummy, is_24bit = false;
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
if (ret) {
NV_ERROR(dev, "Error parsing LVDS table, disabling "
"LVDS\n");
goto fail;
}
nv_connector->use_dithering = !is_24bit;
}
/* Init DVI-I specific properties */
if (dcb->type == DCB_CONNECTOR_DVI_I) {
drm_mode_create_dvi_i_properties(dev);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
drm_connector_attach_property(connector,
dev->mode_config.dithering_mode_property,
nv_connector->use_dithering ?
DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
if (dev_priv->card_type >= NV_50)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
break;
}
if (pgpio->irq_register) {
pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
nouveau_connector_hotplug, connector);
}
drm_sysfs_connector_add(connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
nouveau_backlight_init(connector);
dcb->drm = connector;
return dcb->drm;
fail:
drm_connector_cleanup(connector);
kfree(connector);
return ERR_PTR(ret);
}
static void
nouveau_connector_hotplug(void *data, int plugged)
{
struct drm_connector *connector = data;
struct drm_device *dev = connector->dev;
NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
drm_get_connector_name(connector));
if (connector->encoder && connector->encoder->crtc &&
connector->encoder->crtc->enabled) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
struct drm_encoder_helper_funcs *helper =
connector->encoder->helper_private;
if (nv_encoder->dcb->type == OUTPUT_DP) {
if (plugged)
helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
else
helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
}
}
drm_helper_hpd_irq_event(dev);
}
| gpl-2.0 |
arkusuma/mediapad-kernel-ics | drivers/gpu/drm/nouveau/nv40_graph.c | 1945 | 15354 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
#include "nouveau_ramht.h"
struct nv40_graph_engine {
struct nouveau_exec_engine base;
u32 grctx_size;
};
static struct nouveau_channel *
nv40_graph_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
uint32_t inst;
int i;
inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
if (grctx && grctx->pinst == inst)
return dev_priv->channels.ptr[i];
}
return NULL;
}
static int
nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
{
uint32_t old_cp, tv = 1000, tmp;
int i;
old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
nouveau_wait_for_idle(dev);
for (i = 0; i < tv; i++) {
if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
break;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) {
uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
NV_ERROR(dev, "0x40030C = 0x%08x\n",
nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
return -EBUSY;
}
return 0;
}
static int
nv40_graph_unload_context(struct drm_device *dev)
{
uint32_t inst;
int ret;
inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
return 0;
inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
ret = nv40_graph_transfer_context(dev, inst, 1);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
return ret;
}
static int
nv40_graph_context_new(struct nouveau_channel *chan, int engine)
{
struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx = NULL;
struct nouveau_grctx ctx = {};
unsigned long flags;
int ret;
ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
NVOBJ_FLAG_ZERO_ALLOC, &grctx);
if (ret)
return ret;
/* Initialise default context values */
ctx.dev = chan->dev;
ctx.mode = NOUVEAU_GRCTX_VALS;
ctx.data = grctx;
nv40_grctx_init(&ctx);
nv_wo32(grctx, 0, grctx->vinst);
/* init grctx pointer in ramfc, and on PFIFO if channel is
* already active there
*/
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
chan->engctx[engine] = grctx;
return 0;
}
static void
nv40_graph_context_del(struct nouveau_channel *chan, int engine)
{
struct nouveau_gpuobj *grctx = chan->engctx[engine];
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv04_graph_fifo_access(dev, false);
/* Unload the context if it's the currently active one */
if (nv40_graph_channel(dev) == chan)
nv40_graph_unload_context(dev);
nv04_graph_fifo_access(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the context resources */
nouveau_gpuobj_ref(NULL, &grctx);
chan->engctx[engine] = NULL;
}
int
nv40_graph_object_new(struct nouveau_channel *chan, int engine,
u32 handle, u16 class)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *obj = NULL;
int ret;
ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
if (ret)
return ret;
obj->engine = 1;
obj->class = class;
nv_wo32(obj, 0x00, class);
nv_wo32(obj, 0x04, 0x00000000);
#ifndef __BIG_ENDIAN
nv_wo32(obj, 0x08, 0x00000000);
#else
nv_wo32(obj, 0x08, 0x01000000);
#endif
nv_wo32(obj, 0x0c, 0x00000000);
nv_wo32(obj, 0x10, 0x00000000);
ret = nouveau_ramht_insert(chan, handle, obj);
nouveau_gpuobj_ref(NULL, &obj);
return ret;
}
static void
nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
case 0x40:
case 0x41: /* guess */
case 0x42:
case 0x43:
case 0x45: /* guess */
case 0x4e:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
case 0x44:
case 0x4a:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
case 0x4c:
case 0x67:
default:
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
}
/*
* G70 0x47
* G71 0x49
* NV45 0x48
* G72[M] 0x46
* G73 0x4b
* C51_G7X 0x4c
* C51 0x4e
*/
int
nv40_graph_init(struct drm_device *dev, int engine)
{
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_grctx ctx = {};
uint32_t vramsz, *cp;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
if (!cp)
return -ENOMEM;
ctx.dev = dev;
ctx.mode = NOUVEAU_GRCTX_PROG;
ctx.data = cp;
ctx.ctxprog_max = 256;
nv40_grctx_init(&ctx);
pgraph->grctx_size = ctx.ctxvals_pos * 4;
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
kfree(cp);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
j = nv_rd32(dev, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++)
;
nv_wr32(dev, 0x405000, i);
}
if (dev_priv->chipset == 0x40) {
nv_wr32(dev, 0x4009b0, 0x83280fff);
nv_wr32(dev, 0x4009b4, 0x000000a0);
} else {
nv_wr32(dev, 0x400820, 0x83280eff);
nv_wr32(dev, 0x400824, 0x000000a0);
}
switch (dev_priv->chipset) {
case 0x40:
case 0x45:
nv_wr32(dev, 0x4009b8, 0x0078e366);
nv_wr32(dev, 0x4009bc, 0x0000014c);
break;
case 0x41:
case 0x42: /* pciid also 0x00Cx */
/* case 0x0120: XXX (pciid) */
nv_wr32(dev, 0x400828, 0x007596ff);
nv_wr32(dev, 0x40082c, 0x00000108);
break;
case 0x43:
nv_wr32(dev, 0x400828, 0x0072cb77);
nv_wr32(dev, 0x40082c, 0x00000108);
break;
case 0x44:
case 0x46: /* G72 */
case 0x4a:
case 0x4c: /* G7x-based C51 */
case 0x4e:
nv_wr32(dev, 0x400860, 0);
nv_wr32(dev, 0x400864, 0);
break;
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
nv_wr32(dev, 0x400828, 0x07830610);
nv_wr32(dev, 0x40082c, 0x0000016A);
break;
default:
break;
}
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
/* Tiling related stuff. */
switch (dev_priv->chipset) {
case 0x44:
case 0x4a:
nv_wr32(dev, 0x400bc4, 0x1003d888);
nv_wr32(dev, 0x400bbc, 0xb7a7b500);
break;
case 0x46:
nv_wr32(dev, 0x400bc4, 0x0000e024);
nv_wr32(dev, 0x400bbc, 0xb7a7b520);
break;
case 0x4c:
case 0x4e:
case 0x67:
nv_wr32(dev, 0x400bc4, 0x1003d888);
nv_wr32(dev, 0x400bbc, 0xb7a7b540);
break;
default:
break;
}
/* Turn all the tiling regions off. */
for (i = 0; i < pfb->num_tiles; i++)
nv40_graph_set_tile_region(dev, i);
/* begin RAM config */
vramsz = pci_resource_len(dev->pdev, 0) - 1;
switch (dev_priv->chipset) {
case 0x40:
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x400820, 0);
nv_wr32(dev, 0x400824, 0);
nv_wr32(dev, 0x400864, vramsz);
nv_wr32(dev, 0x400868, vramsz);
break;
default:
switch (dev_priv->chipset) {
case 0x41:
case 0x42:
case 0x43:
case 0x45:
case 0x4e:
case 0x44:
case 0x4a:
nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
break;
default:
nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, 0x400840, 0);
nv_wr32(dev, 0x400844, 0);
nv_wr32(dev, 0x4008A0, vramsz);
nv_wr32(dev, 0x4008A4, vramsz);
break;
}
return 0;
}
static int
nv40_graph_fini(struct drm_device *dev, int engine)
{
nv40_graph_unload_context(dev);
return 0;
}
static int
nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *grctx;
unsigned long flags;
int i;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (!dev_priv->channels.ptr[i])
continue;
grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
if (grctx && grctx->pinst == inst)
break;
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return i;
}
static void
nv40_graph_isr(struct drm_device *dev)
{
u32 stat;
while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
u32 chid = nv40_graph_isr_chid(dev, inst);
u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
u32 show = stat;
if (stat & NV_PGRAPH_INTR_ERROR) {
if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
show &= ~NV_PGRAPH_INTR_ERROR;
} else
if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
nv_mask(dev, 0x402000, 0, 0);
}
}
nv_wr32(dev, NV03_PGRAPH_INTR, stat);
nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
if (show && nouveau_ratelimit()) {
NV_INFO(dev, "PGRAPH -");
nouveau_bitfield_print(nv10_graph_intr, show);
printk(" nsource:");
nouveau_bitfield_print(nv04_graph_nsource, nsource);
printk(" nstatus:");
nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
printk("\n");
NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
}
}
}
static void
nv40_graph_destroy(struct drm_device *dev, int engine)
{
struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
nouveau_irq_unregister(dev, 12);
NVOBJ_ENGINE_DEL(dev, GR);
kfree(pgraph);
}
int
nv40_graph_create(struct drm_device *dev)
{
struct nv40_graph_engine *pgraph;
pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
if (!pgraph)
return -ENOMEM;
pgraph->base.destroy = nv40_graph_destroy;
pgraph->base.init = nv40_graph_init;
pgraph->base.fini = nv40_graph_fini;
pgraph->base.context_new = nv40_graph_context_new;
pgraph->base.context_del = nv40_graph_context_del;
pgraph->base.object_new = nv40_graph_object_new;
pgraph->base.set_tile_region = nv40_graph_set_tile_region;
NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
nouveau_irq_register(dev, 12, nv40_graph_isr);
NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
NVOBJ_CLASS(dev, 0x0030, GR); /* null */
NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
/* curie */
if (nv44_graph_class(dev))
NVOBJ_CLASS(dev, 0x4497, GR);
else
NVOBJ_CLASS(dev, 0x4097, GR);
/* nvsw */
NVOBJ_CLASS(dev, 0x506e, SW);
NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
return 0;
}
| gpl-2.0 |
casper-astro/linux_devel | arch/mips/sgi-ip22/ip22-int.c | 2201 | 8678 | /*
* ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
* found on INDY and Indigo2 workstations.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
* - Indigo2 changes
* - Interrupt handling fixes
* Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/ftrace.h>
#include <asm/irq_cpu.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
/* So far nothing hangs here */
#undef USE_LIO3_IRQ
struct sgint_regs *sgint;
static char lc0msk_to_irqnr[256];
static char lc1msk_to_irqnr[256];
static char lc2msk_to_irqnr[256];
static char lc3msk_to_irqnr[256];
extern int ip22_eisa_init(void);
static void enable_local0_irq(struct irq_data *d)
{
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (d->irq != SGI_MAP_0_IRQ)
sgint->imask0 |= (1 << (d->irq - SGINT_LOCAL0));
}
static void disable_local0_irq(struct irq_data *d)
{
sgint->imask0 &= ~(1 << (d->irq - SGINT_LOCAL0));
}
static struct irq_chip ip22_local0_irq_type = {
.name = "IP22 local 0",
.irq_mask = disable_local0_irq,
.irq_unmask = enable_local0_irq,
};
static void enable_local1_irq(struct irq_data *d)
{
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (d->irq != SGI_MAP_1_IRQ)
sgint->imask1 |= (1 << (d->irq - SGINT_LOCAL1));
}
static void disable_local1_irq(struct irq_data *d)
{
sgint->imask1 &= ~(1 << (d->irq - SGINT_LOCAL1));
}
static struct irq_chip ip22_local1_irq_type = {
.name = "IP22 local 1",
.irq_mask = disable_local1_irq,
.irq_unmask = enable_local1_irq,
};
static void enable_local2_irq(struct irq_data *d)
{
sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
sgint->cmeimask0 |= (1 << (d->irq - SGINT_LOCAL2));
}
static void disable_local2_irq(struct irq_data *d)
{
sgint->cmeimask0 &= ~(1 << (d->irq - SGINT_LOCAL2));
if (!sgint->cmeimask0)
sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
}
static struct irq_chip ip22_local2_irq_type = {
.name = "IP22 local 2",
.irq_mask = disable_local2_irq,
.irq_unmask = enable_local2_irq,
};
static void enable_local3_irq(struct irq_data *d)
{
sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
sgint->cmeimask1 |= (1 << (d->irq - SGINT_LOCAL3));
}
static void disable_local3_irq(struct irq_data *d)
{
sgint->cmeimask1 &= ~(1 << (d->irq - SGINT_LOCAL3));
if (!sgint->cmeimask1)
sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
}
static struct irq_chip ip22_local3_irq_type = {
.name = "IP22 local 3",
.irq_mask = disable_local3_irq,
.irq_unmask = enable_local3_irq,
};
static void indy_local0_irqdispatch(void)
{
u8 mask = sgint->istat0 & sgint->imask0;
u8 mask2;
int irq;
if (mask & SGINT_ISTAT0_LIO2) {
mask2 = sgint->vmeistat & sgint->cmeimask0;
irq = lc2msk_to_irqnr[mask2];
} else
irq = lc0msk_to_irqnr[mask];
/* if irq == 0, then the interrupt has already been cleared */
if (irq)
do_IRQ(irq);
}
static void indy_local1_irqdispatch(void)
{
u8 mask = sgint->istat1 & sgint->imask1;
u8 mask2;
int irq;
if (mask & SGINT_ISTAT1_LIO3) {
mask2 = sgint->vmeistat & sgint->cmeimask1;
irq = lc3msk_to_irqnr[mask2];
} else
irq = lc1msk_to_irqnr[mask];
/* if irq == 0, then the interrupt has already been cleared */
if (irq)
do_IRQ(irq);
}
extern void ip22_be_interrupt(int irq);
static void __irq_entry indy_buserror_irq(void)
{
int irq = SGI_BUSERR_IRQ;
irq_enter();
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
ip22_be_interrupt(irq);
irq_exit();
}
static struct irqaction local0_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.name = "local0 cascade",
};
static struct irqaction local1_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.name = "local1 cascade",
};
static struct irqaction buserr = {
.handler = no_action,
.flags = IRQF_DISABLED,
.name = "Bus Error",
};
static struct irqaction map0_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.name = "mapable0 cascade",
};
#ifdef USE_LIO3_IRQ
static struct irqaction map1_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.name = "mapable1 cascade",
};
#define SGI_INTERRUPTS SGINT_END
#else
#define SGI_INTERRUPTS SGINT_LOCAL3
#endif
extern void indy_8254timer_irq(void);
/*
* IRQs on the INDY look basically (barring software IRQs which we don't use
* at all) like:
*
* MIPS IRQ Source
* -------- ------
* 0 Software (ignored)
* 1 Software (ignored)
* 2 Local IRQ level zero
* 3 Local IRQ level one
* 4 8254 Timer zero
* 5 8254 Timer one
* 6 Bus Error
* 7 R4k timer (what we use)
*
* We handle the IRQ according to _our_ priority which is:
*
* Highest ---- R4k Timer
* Local IRQ zero
* Local IRQ one
* Bus Error
* 8254 Timer zero
* Lowest ---- 8254 Timer one
*
* then we just return, if multiple IRQs are pending then we will just take
* another exception, big deal.
*/
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
/*
* First we check for r4k counter/timer IRQ.
*/
if (pending & CAUSEF_IP7)
do_IRQ(SGI_TIMER_IRQ);
else if (pending & CAUSEF_IP2)
indy_local0_irqdispatch();
else if (pending & CAUSEF_IP3)
indy_local1_irqdispatch();
else if (pending & CAUSEF_IP6)
indy_buserror_irq();
else if (pending & (CAUSEF_IP4 | CAUSEF_IP5))
indy_8254timer_irq();
}
void __init arch_init_irq(void)
{
int i;
/* Init local mask --> irq tables. */
for (i = 0; i < 256; i++) {
if (i & 0x80) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 7;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 7;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 7;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 7;
} else if (i & 0x40) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 6;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 6;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 6;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 6;
} else if (i & 0x20) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 5;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 5;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 5;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 5;
} else if (i & 0x10) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 4;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 4;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 4;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 4;
} else if (i & 0x08) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 3;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 3;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 3;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 3;
} else if (i & 0x04) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 2;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 2;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 2;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 2;
} else if (i & 0x02) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 1;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 1;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 1;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 1;
} else if (i & 0x01) {
lc0msk_to_irqnr[i] = SGINT_LOCAL0 + 0;
lc1msk_to_irqnr[i] = SGINT_LOCAL1 + 0;
lc2msk_to_irqnr[i] = SGINT_LOCAL2 + 0;
lc3msk_to_irqnr[i] = SGINT_LOCAL3 + 0;
} else {
lc0msk_to_irqnr[i] = 0;
lc1msk_to_irqnr[i] = 0;
lc2msk_to_irqnr[i] = 0;
lc3msk_to_irqnr[i] = 0;
}
}
/* Mask out all interrupts. */
sgint->imask0 = 0;
sgint->imask1 = 0;
sgint->cmeimask0 = 0;
sgint->cmeimask1 = 0;
/* init CPU irqs */
mips_cpu_irq_init();
for (i = SGINT_LOCAL0; i < SGI_INTERRUPTS; i++) {
struct irq_chip *handler;
if (i < SGINT_LOCAL1)
handler = &ip22_local0_irq_type;
else if (i < SGINT_LOCAL2)
handler = &ip22_local1_irq_type;
else if (i < SGINT_LOCAL3)
handler = &ip22_local2_irq_type;
else
handler = &ip22_local3_irq_type;
irq_set_chip_and_handler(i, handler, handle_level_irq);
}
/* vector handler. this register the IRQ as non-sharable */
setup_irq(SGI_LOCAL_0_IRQ, &local0_cascade);
setup_irq(SGI_LOCAL_1_IRQ, &local1_cascade);
setup_irq(SGI_BUSERR_IRQ, &buserr);
/* cascade in cascade. i love Indy ;-) */
setup_irq(SGI_MAP_0_IRQ, &map0_cascade);
#ifdef USE_LIO3_IRQ
setup_irq(SGI_MAP_1_IRQ, &map1_cascade);
#endif
#ifdef CONFIG_EISA
if (ip22_is_fullhouse()) /* Only Indigo-2 has EISA stuff */
ip22_eisa_init();
#endif
}
| gpl-2.0 |
KlemensWinter/ecafe_kernel | arch/sh/kernel/cpu/sh4a/smp-shx3.c | 3737 | 4050 | /*
* SH-X3 SMP
*
* Copyright (C) 2007 - 2010 Paul Mundt
* Copyright (C) 2007 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/sections.h>
#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12))
#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12))
#define STBCR_MSTP 0x00000001
#define STBCR_RESET 0x00000002
#define STBCR_SLEEP 0x00000004
#define STBCR_LTSLP 0x80000000
static irqreturn_t ipi_interrupt_handler(int irq, void *arg)
{
unsigned int message = (unsigned int)(long)arg;
unsigned int cpu = hard_smp_processor_id();
unsigned int offs = 4 * cpu;
unsigned int x;
x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */
x &= (1 << (message << 2));
__raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */
smp_message_recv(message);
return IRQ_HANDLED;
}
static void shx3_smp_setup(void)
{
unsigned int cpu = 0;
int i, num;
init_cpu_possible(cpumask_of(cpu));
/* Enable light sleep for the boot CPU */
__raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu));
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
/*
* Do this stupidly for now.. we don't have an easy way to probe
* for the total number of cores.
*/
for (i = 1, num = 0; i < NR_CPUS; i++) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void shx3_prepare_cpus(unsigned int max_cpus)
{
int i;
local_timer_setup(0);
BUILD_BUG_ON(SMP_MSG_NR >= 8);
for (i = 0; i < SMP_MSG_NR; i++)
request_irq(104 + i, ipi_interrupt_handler,
IRQF_DISABLED | IRQF_PERCPU, "IPI", (void *)(long)i);
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
}
static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point)
{
if (__in_29bit_mode())
__raw_writel(entry_point, RESET_REG(cpu));
else
__raw_writel(virt_to_phys(entry_point), RESET_REG(cpu));
if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu_relax();
/* Start up secondary processor by sending a reset */
__raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu));
}
static unsigned int shx3_smp_processor_id(void)
{
return __raw_readl(0xff000048); /* CPIDR */
}
static void shx3_send_ipi(unsigned int cpu, unsigned int message)
{
unsigned long addr = 0xfe410070 + (cpu * 4);
BUG_ON(cpu >= 4);
__raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */
}
static void shx3_update_boot_vector(unsigned int cpu)
{
__raw_writel(STBCR_MSTP, STBCR_REG(cpu));
while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP))
cpu_relax();
__raw_writel(STBCR_RESET, STBCR_REG(cpu));
}
static int __cpuinit
shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)hcpu;
switch (action) {
case CPU_UP_PREPARE:
shx3_update_boot_vector(cpu);
break;
case CPU_ONLINE:
pr_info("CPU %u is now online\n", cpu);
break;
case CPU_DEAD:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
.notifier_call = shx3_cpu_callback,
};
static int __cpuinit register_shx3_cpu_notifier(void)
{
register_hotcpu_notifier(&shx3_cpu_notifier);
return 0;
}
late_initcall(register_shx3_cpu_notifier);
struct plat_smp_ops shx3_smp_ops = {
.smp_setup = shx3_smp_setup,
.prepare_cpus = shx3_prepare_cpus,
.start_cpu = shx3_start_cpu,
.smp_processor_id = shx3_smp_processor_id,
.send_ipi = shx3_send_ipi,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
};
| gpl-2.0 |
garwynn/D710SPR_FL24_Kernel | arch/ia64/kernel/pci-swiotlb.c | 3993 | 1502 | /* Glue code to lib/swiotlb.c */
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/machvec.h>
int swiotlb __read_mostly;
EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
struct dma_map_ops swiotlb_dma_ops = {
.alloc_coherent = ia64_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
void __init swiotlb_dma_init(void)
{
dma_ops = &swiotlb_dma_ops;
swiotlb_init(1);
}
void __init pci_swiotlb_init(void)
{
if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
machvec_init("dig");
swiotlb_init(1);
dma_ops = &swiotlb_dma_ops;
#else
panic("Unable to find Intel IOMMU");
#endif
}
}
| gpl-2.0 |
CyanogenMod/android_kernel_oppo_n3 | arch/arm/mach-exynos/dev-dwmci.c | 4761 | 1902 | /*
* linux/arch/arm/mach-exynos4/dev-dwmci.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Platform device for Synopsys DesignWare Mobile Storage IP
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mmc/dw_mmc.h>
#include <plat/devs.h>
#include <mach/map.h>
static int exynos4_dwmci_get_bus_wd(u32 slot_id)
{
return 4;
}
static int exynos4_dwmci_init(u32 slot_id, irq_handler_t handler, void *data)
{
return 0;
}
static struct resource exynos4_dwmci_resource[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K),
[1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI),
};
static struct dw_mci_board exynos4_dwci_pdata = {
.num_slots = 1,
.quirks = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
.bus_hz = 80 * 1000 * 1000,
.detect_delay_ms = 200,
.init = exynos4_dwmci_init,
.get_bus_wd = exynos4_dwmci_get_bus_wd,
};
static u64 exynos4_dwmci_dmamask = DMA_BIT_MASK(32);
struct platform_device exynos4_device_dwmci = {
.name = "dw_mmc",
.id = -1,
.num_resources = ARRAY_SIZE(exynos4_dwmci_resource),
.resource = exynos4_dwmci_resource,
.dev = {
.dma_mask = &exynos4_dwmci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &exynos4_dwci_pdata,
},
};
void __init exynos4_dwmci_set_platdata(struct dw_mci_board *pd)
{
struct dw_mci_board *npd;
npd = s3c_set_platdata(pd, sizeof(struct dw_mci_board),
&exynos4_device_dwmci);
if (!npd->init)
npd->init = exynos4_dwmci_init;
if (!npd->get_bus_wd)
npd->get_bus_wd = exynos4_dwmci_get_bus_wd;
}
| gpl-2.0 |
SlimRoms/kernel_asus_flo | drivers/net/wireless/ath/ath6kl/usb.c | 4761 | 10422 | /*
* Copyright (c) 2007-2011 Atheros Communications Inc.
* Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/usb.h>
#include "debug.h"
#include "core.h"
/* usb device object */
struct ath6kl_usb {
struct usb_device *udev;
struct usb_interface *interface;
u8 *diag_cmd_buffer;
u8 *diag_resp_buffer;
struct ath6kl *ar;
};
/* diagnostic command defnitions */
#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
#define ATH6KL_USB_CONTROL_REQ_DIAG_CMD 3
#define ATH6KL_USB_CONTROL_REQ_DIAG_RESP 4
#define ATH6KL_USB_CTRL_DIAG_CC_READ 0
#define ATH6KL_USB_CTRL_DIAG_CC_WRITE 1
struct ath6kl_usb_ctrl_diag_cmd_write {
__le32 cmd;
__le32 address;
__le32 value;
__le32 _pad[1];
} __packed;
struct ath6kl_usb_ctrl_diag_cmd_read {
__le32 cmd;
__le32 address;
} __packed;
struct ath6kl_usb_ctrl_diag_resp_read {
__le32 value;
} __packed;
#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
{
usb_set_intfdata(ar_usb->interface, NULL);
kfree(ar_usb->diag_cmd_buffer);
kfree(ar_usb->diag_resp_buffer);
kfree(ar_usb);
}
static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
{
struct ath6kl_usb *ar_usb = NULL;
struct usb_device *dev = interface_to_usbdev(interface);
int status = 0;
ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
if (ar_usb == NULL)
goto fail_ath6kl_usb_create;
memset(ar_usb, 0, sizeof(struct ath6kl_usb));
usb_set_intfdata(interface, ar_usb);
ar_usb->udev = dev;
ar_usb->interface = interface;
ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
if (ar_usb->diag_cmd_buffer == NULL) {
status = -ENOMEM;
goto fail_ath6kl_usb_create;
}
ar_usb->diag_resp_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_RESP,
GFP_KERNEL);
if (ar_usb->diag_resp_buffer == NULL) {
status = -ENOMEM;
goto fail_ath6kl_usb_create;
}
fail_ath6kl_usb_create:
if (status != 0) {
ath6kl_usb_destroy(ar_usb);
ar_usb = NULL;
}
return ar_usb;
}
static void ath6kl_usb_device_detached(struct usb_interface *interface)
{
struct ath6kl_usb *ar_usb;
ar_usb = usb_get_intfdata(interface);
if (ar_usb == NULL)
return;
ath6kl_stop_txrx(ar_usb->ar);
ath6kl_core_cleanup(ar_usb->ar);
ath6kl_usb_destroy(ar_usb);
}
static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
u8 req, u16 value, u16 index, void *data,
u32 size)
{
u8 *buf = NULL;
int ret;
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
memcpy(buf, data, size);
}
/* note: if successful returns number of bytes transfered */
ret = usb_control_msg(ar_usb->udev,
usb_sndctrlpipe(ar_usb->udev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 1000);
if (ret < 0) {
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
__func__, ret);
}
kfree(buf);
return 0;
}
static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
u8 req, u16 value, u16 index, void *data,
u32 size)
{
u8 *buf = NULL;
int ret;
if (size > 0) {
buf = kmalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
}
/* note: if successful returns number of bytes transfered */
ret = usb_control_msg(ar_usb->udev,
usb_rcvctrlpipe(ar_usb->udev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
size, 2 * HZ);
if (ret < 0) {
ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
__func__, ret);
}
memcpy((u8 *) data, buf, size);
kfree(buf);
return 0;
}
static int ath6kl_usb_ctrl_msg_exchange(struct ath6kl_usb *ar_usb,
u8 req_val, u8 *req_buf, u32 req_len,
u8 resp_val, u8 *resp_buf, u32 *resp_len)
{
int ret;
/* send command */
ret = ath6kl_usb_submit_ctrl_out(ar_usb, req_val, 0, 0,
req_buf, req_len);
if (ret != 0)
return ret;
if (resp_buf == NULL) {
/* no expected response */
return ret;
}
/* get response */
ret = ath6kl_usb_submit_ctrl_in(ar_usb, resp_val, 0, 0,
resp_buf, *resp_len);
return ret;
}
static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
{
struct ath6kl_usb *ar_usb = ar->hif_priv;
struct ath6kl_usb_ctrl_diag_resp_read *resp;
struct ath6kl_usb_ctrl_diag_cmd_read *cmd;
u32 resp_len;
int ret;
cmd = (struct ath6kl_usb_ctrl_diag_cmd_read *) ar_usb->diag_cmd_buffer;
memset(cmd, 0, sizeof(*cmd));
cmd->cmd = ATH6KL_USB_CTRL_DIAG_CC_READ;
cmd->address = cpu_to_le32(address);
resp_len = sizeof(*resp);
ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
(u8 *) cmd,
sizeof(struct ath6kl_usb_ctrl_diag_cmd_write),
ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
ar_usb->diag_resp_buffer, &resp_len);
if (ret)
return ret;
resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
ar_usb->diag_resp_buffer;
*data = le32_to_cpu(resp->value);
return ret;
}
static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
{
struct ath6kl_usb *ar_usb = ar->hif_priv;
struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
memset(cmd, 0, sizeof(struct ath6kl_usb_ctrl_diag_cmd_write));
cmd->cmd = cpu_to_le32(ATH6KL_USB_CTRL_DIAG_CC_WRITE);
cmd->address = cpu_to_le32(address);
cmd->value = data;
return ath6kl_usb_ctrl_msg_exchange(ar_usb,
ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
(u8 *) cmd,
sizeof(*cmd),
0, NULL, NULL);
}
static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
{
struct ath6kl_usb *ar_usb = ar->hif_priv;
int ret;
/* get response */
ret = ath6kl_usb_submit_ctrl_in(ar_usb,
ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
0, 0, buf, len);
if (ret != 0) {
ath6kl_err("Unable to read the bmi data from the device: %d\n",
ret);
return ret;
}
return 0;
}
static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
{
struct ath6kl_usb *ar_usb = ar->hif_priv;
int ret;
/* send command */
ret = ath6kl_usb_submit_ctrl_out(ar_usb,
ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
0, 0, buf, len);
if (ret != 0) {
ath6kl_err("unable to send the bmi data to the device: %d\n",
ret);
return ret;
}
return 0;
}
static int ath6kl_usb_power_on(struct ath6kl *ar)
{
return 0;
}
static int ath6kl_usb_power_off(struct ath6kl *ar)
{
return 0;
}
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
.bmi_read = ath6kl_usb_bmi_read,
.bmi_write = ath6kl_usb_bmi_write,
.power_on = ath6kl_usb_power_on,
.power_off = ath6kl_usb_power_off,
};
/* ath6kl usb driver registered functions */
static int ath6kl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(interface);
struct ath6kl *ar;
struct ath6kl_usb *ar_usb = NULL;
int vendor_id, product_id;
int ret = 0;
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
ath6kl_dbg(ATH6KL_DBG_USB, "vendor_id = %04x\n", vendor_id);
ath6kl_dbg(ATH6KL_DBG_USB, "product_id = %04x\n", product_id);
if (interface->cur_altsetting)
ath6kl_dbg(ATH6KL_DBG_USB, "USB Interface %d\n",
interface->cur_altsetting->desc.bInterfaceNumber);
if (dev->speed == USB_SPEED_HIGH)
ath6kl_dbg(ATH6KL_DBG_USB, "USB 2.0 Host\n");
else
ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n");
ar_usb = ath6kl_usb_create(interface);
if (ar_usb == NULL) {
ret = -ENOMEM;
goto err_usb_put;
}
ar = ath6kl_core_create(&ar_usb->udev->dev);
if (ar == NULL) {
ath6kl_err("Failed to alloc ath6kl core\n");
ret = -ENOMEM;
goto err_usb_destroy;
}
ar->hif_priv = ar_usb;
ar->hif_type = ATH6KL_HIF_TYPE_USB;
ar->hif_ops = &ath6kl_usb_ops;
ar->mbox_info.block_size = 16;
ar->bmi.max_data_size = 252;
ar_usb->ar = ar;
ret = ath6kl_core_init(ar);
if (ret) {
ath6kl_err("Failed to init ath6kl core: %d\n", ret);
goto err_core_free;
}
return ret;
err_core_free:
ath6kl_core_destroy(ar);
err_usb_destroy:
ath6kl_usb_destroy(ar_usb);
err_usb_put:
usb_put_dev(dev);
return ret;
}
static void ath6kl_usb_remove(struct usb_interface *interface)
{
usb_put_dev(interface_to_usbdev(interface));
ath6kl_usb_device_detached(interface);
}
/* table of devices that work with this driver */
static struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9374)},
{ /* Terminating entry */ },
};
MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
static struct usb_driver ath6kl_usb_driver = {
.name = "ath6kl_usb",
.probe = ath6kl_usb_probe,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
};
static int ath6kl_usb_init(void)
{
usb_register(&ath6kl_usb_driver);
return 0;
}
static void ath6kl_usb_exit(void)
{
usb_deregister(&ath6kl_usb_driver);
}
module_init(ath6kl_usb_init);
module_exit(ath6kl_usb_exit);
MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
| gpl-2.0 |
zlaja/android_kernel_lge_msm8610 | arch/x86/kernel/apic/ipi.c | 8089 | 3836 | #include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <asm/smp.h>
#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/apic.h>
#include <asm/proto.h>
#include <asm/ipi.h>
void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
{
unsigned long query_cpu;
unsigned long flags;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicast to each CPU instead.
* - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
int vector)
{
unsigned int this_cpu = smp_processor_id();
unsigned int query_cpu;
unsigned long flags;
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
query_cpu), vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
#ifdef CONFIG_X86_32
void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned int query_cpu;
/*
* Hack. The clustered APIC addressing mode doesn't allow us to send
* to an arbitrary mask, so I do a unicasts to each CPU instead. This
* should be modified to do 1 message per cluster ID - mbligh
*/
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, apic->dest_logical);
local_irq_restore(flags);
}
void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu == this_cpu)
continue;
__default_send_IPI_dest_field(
early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, apic->dest_logical);
}
local_irq_restore(flags);
}
/*
* This is only used on smaller machines.
*/
void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags;
if (WARN_ONCE(!mask, "empty IPI mask"))
return;
local_irq_save(flags);
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
local_irq_restore(flags);
}
void default_send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then we get an APIC send
* error if we try to broadcast, thus avoid sending IPIs in this case.
*/
if (!(num_online_cpus() > 1))
return;
__default_local_send_IPI_allbutself(vector);
}
void default_send_IPI_all(int vector)
{
__default_local_send_IPI_all(vector);
}
void default_send_IPI_self(int vector)
{
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
}
/* must come after the send_IPI functions above for inlining */
static int convert_apicid_to_cpu(int apic_id)
{
int i;
for_each_possible_cpu(i) {
if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
return i;
}
return -1;
}
int safe_smp_processor_id(void)
{
int apicid, cpuid;
if (!cpu_has_apic)
return 0;
apicid = hard_smp_processor_id();
if (apicid == BAD_APICID)
return 0;
cpuid = convert_apicid_to_cpu(apicid);
return cpuid >= 0 ? cpuid : 0;
}
#endif
| gpl-2.0 |
android-legacy/kernel | drivers/mtd/maps/pmcmsp-flash.c | 12185 | 6243 | /*
* Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
* Config with both CFI and JEDEC device support.
*
* Basically physmap.c with the addition of partitions and
* an array of mapping info to accommodate more than one flash type per board.
*
* Copyright 2005-2007 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <msp_prom.h>
#include <msp_regs.h>
static struct mtd_info **msp_flash;
static struct mtd_partition **msp_parts;
static struct map_info *msp_maps;
static int fcnt;
#define DEBUG_MARKER printk(KERN_NOTICE "%s[%d]\n", __func__, __LINE__)
static int __init init_msp_flash(void)
{
int i, j, ret = -ENOMEM;
int offset, coff;
char *env;
int pcnt;
char flash_name[] = "flash0";
char part_name[] = "flash0_0";
unsigned addr, size;
/* If ELB is disabled by "ful-mux" mode, we can't get at flash */
if ((*DEV_ID_REG & DEV_ID_SINGLE_PC) &&
(*ELB_1PC_EN_REG & SINGLE_PCCARD)) {
printk(KERN_NOTICE "Single PC Card mode: no flash access\n");
return -ENXIO;
}
/* examine the prom environment for flash devices */
for (fcnt = 0; (env = prom_getenv(flash_name)); fcnt++)
flash_name[5] = '0' + fcnt + 1;
if (fcnt < 1)
return -ENXIO;
printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt);
msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL);
if (!msp_flash)
return -ENOMEM;
msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL);
if (!msp_parts)
goto free_msp_flash;
msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL);
if (!msp_maps)
goto free_msp_parts;
/* loop over the flash devices, initializing each */
for (i = 0; i < fcnt; i++) {
/* examine the prom environment for flash partititions */
part_name[5] = '0' + i;
part_name[7] = '0';
for (pcnt = 0; (env = prom_getenv(part_name)); pcnt++)
part_name[7] = '0' + pcnt + 1;
if (pcnt == 0) {
printk(KERN_NOTICE "Skipping flash device %d "
"(no partitions defined)\n", i);
continue;
}
msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition),
GFP_KERNEL);
if (!msp_parts[i])
goto cleanup_loop;
/* now initialize the devices proper */
flash_name[5] = '0' + i;
env = prom_getenv(flash_name);
if (sscanf(env, "%x:%x", &addr, &size) < 2) {
ret = -ENXIO;
kfree(msp_parts[i]);
goto cleanup_loop;
}
addr = CPHYSADDR(addr);
printk(KERN_NOTICE
"MSP flash device \"%s\": 0x%08x at 0x%08x\n",
flash_name, size, addr);
/* This must matchs the actual size of the flash chip */
msp_maps[i].size = size;
msp_maps[i].phys = addr;
/*
* Platforms have a specific limit of the size of memory
* which may be mapped for flash:
*/
if (size > CONFIG_MSP_FLASH_MAP_LIMIT)
size = CONFIG_MSP_FLASH_MAP_LIMIT;
msp_maps[i].virt = ioremap(addr, size);
if (msp_maps[i].virt == NULL) {
ret = -ENXIO;
kfree(msp_parts[i]);
goto cleanup_loop;
}
msp_maps[i].bankwidth = 1;
msp_maps[i].name = kmalloc(7, GFP_KERNEL);
if (!msp_maps[i].name) {
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
goto cleanup_loop;
}
msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
for (j = 0; j < pcnt; j++) {
part_name[5] = '0' + i;
part_name[7] = '0' + j;
env = prom_getenv(part_name);
if (sscanf(env, "%x:%x:%n", &offset, &size,
&coff) < 2) {
ret = -ENXIO;
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
goto cleanup_loop;
}
msp_parts[i][j].size = size;
msp_parts[i][j].offset = offset;
msp_parts[i][j].name = env + coff;
}
/* now probe and add the device */
simple_map_init(&msp_maps[i]);
msp_flash[i] = do_map_probe("cfi_probe", &msp_maps[i]);
if (msp_flash[i]) {
msp_flash[i]->owner = THIS_MODULE;
mtd_device_register(msp_flash[i], msp_parts[i], pcnt);
} else {
printk(KERN_ERR "map probe failed for flash\n");
ret = -ENXIO;
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
goto cleanup_loop;
}
}
return 0;
cleanup_loop:
while (i--) {
mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
kfree(msp_maps[i].name);
iounmap(msp_maps[i].virt);
kfree(msp_parts[i]);
}
kfree(msp_maps);
free_msp_parts:
kfree(msp_parts);
free_msp_flash:
kfree(msp_flash);
return ret;
}
static void __exit cleanup_msp_flash(void)
{
int i;
for (i = 0; i < fcnt; i++) {
mtd_device_unregister(msp_flash[i]);
map_destroy(msp_flash[i]);
iounmap((void *)msp_maps[i].virt);
/* free the memory */
kfree(msp_maps[i].name);
kfree(msp_parts[i]);
}
kfree(msp_flash);
kfree(msp_parts);
kfree(msp_maps);
}
MODULE_AUTHOR("PMC-Sierra, Inc");
MODULE_DESCRIPTION("MTD map driver for PMC-Sierra MSP boards");
MODULE_LICENSE("GPL");
module_init(init_msp_flash);
module_exit(cleanup_msp_flash);
| gpl-2.0 |
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30 | sound/isa/gus/gus_reset.c | 14745 | 13408 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <sound/core.h>
#include <sound/gus.h>
extern void snd_gf1_timers_init(struct snd_gus_card * gus);
extern void snd_gf1_timers_done(struct snd_gus_card * gus);
extern int snd_gf1_synth_init(struct snd_gus_card * gus);
extern void snd_gf1_synth_done(struct snd_gus_card * gus);
/*
* ok.. default interrupt handlers...
*/
static void snd_gf1_default_interrupt_handler_midi_out(struct snd_gus_card * gus)
{
snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd &= ~0x20);
}
static void snd_gf1_default_interrupt_handler_midi_in(struct snd_gus_card * gus)
{
snd_gf1_uart_cmd(gus, gus->gf1.uart_cmd &= ~0x80);
}
static void snd_gf1_default_interrupt_handler_timer1(struct snd_gus_card * gus)
{
snd_gf1_i_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, gus->gf1.timer_enabled &= ~4);
}
static void snd_gf1_default_interrupt_handler_timer2(struct snd_gus_card * gus)
{
snd_gf1_i_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, gus->gf1.timer_enabled &= ~8);
}
static void snd_gf1_default_interrupt_handler_wave_and_volume(struct snd_gus_card * gus, struct snd_gus_voice * voice)
{
snd_gf1_i_ctrl_stop(gus, 0x00);
snd_gf1_i_ctrl_stop(gus, 0x0d);
}
static void snd_gf1_default_interrupt_handler_dma_write(struct snd_gus_card * gus)
{
snd_gf1_i_write8(gus, 0x41, 0x00);
}
static void snd_gf1_default_interrupt_handler_dma_read(struct snd_gus_card * gus)
{
snd_gf1_i_write8(gus, 0x49, 0x00);
}
void snd_gf1_set_default_handlers(struct snd_gus_card * gus, unsigned int what)
{
if (what & SNDRV_GF1_HANDLER_MIDI_OUT)
gus->gf1.interrupt_handler_midi_out = snd_gf1_default_interrupt_handler_midi_out;
if (what & SNDRV_GF1_HANDLER_MIDI_IN)
gus->gf1.interrupt_handler_midi_in = snd_gf1_default_interrupt_handler_midi_in;
if (what & SNDRV_GF1_HANDLER_TIMER1)
gus->gf1.interrupt_handler_timer1 = snd_gf1_default_interrupt_handler_timer1;
if (what & SNDRV_GF1_HANDLER_TIMER2)
gus->gf1.interrupt_handler_timer2 = snd_gf1_default_interrupt_handler_timer2;
if (what & SNDRV_GF1_HANDLER_VOICE) {
struct snd_gus_voice *voice;
voice = &gus->gf1.voices[what & 0xffff];
voice->handler_wave =
voice->handler_volume = snd_gf1_default_interrupt_handler_wave_and_volume;
voice->handler_effect = NULL;
voice->volume_change = NULL;
}
if (what & SNDRV_GF1_HANDLER_DMA_WRITE)
gus->gf1.interrupt_handler_dma_write = snd_gf1_default_interrupt_handler_dma_write;
if (what & SNDRV_GF1_HANDLER_DMA_READ)
gus->gf1.interrupt_handler_dma_read = snd_gf1_default_interrupt_handler_dma_read;
}
/*
*/
static void snd_gf1_clear_regs(struct snd_gus_card * gus)
{
unsigned long flags;
spin_lock_irqsave(&gus->reg_lock, flags);
inb(GUSP(gus, IRQSTAT));
snd_gf1_write8(gus, 0x41, 0); /* DRAM DMA Control Register */
snd_gf1_write8(gus, 0x45, 0); /* Timer Control */
snd_gf1_write8(gus, 0x49, 0); /* Sampling Control Register */
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
static void snd_gf1_look_regs(struct snd_gus_card * gus)
{
unsigned long flags;
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_look8(gus, 0x41); /* DRAM DMA Control Register */
snd_gf1_look8(gus, 0x49); /* Sampling Control Register */
inb(GUSP(gus, IRQSTAT));
snd_gf1_read8(gus, 0x0f); /* IRQ Source Register */
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
/*
* put selected GF1 voices to initial stage...
*/
void snd_gf1_smart_stop_voice(struct snd_gus_card * gus, unsigned short voice)
{
unsigned long flags;
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_select_voice(gus, voice);
#if 0
printk(KERN_DEBUG " -%i- smart stop voice - volume = 0x%x\n", voice, snd_gf1_i_read16(gus, SNDRV_GF1_VW_VOLUME));
#endif
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_ADDRESS_CONTROL);
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_VOLUME_CONTROL);
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
void snd_gf1_stop_voice(struct snd_gus_card * gus, unsigned short voice)
{
unsigned long flags;
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_select_voice(gus, voice);
#if 0
printk(KERN_DEBUG " -%i- stop voice - volume = 0x%x\n", voice, snd_gf1_i_read16(gus, SNDRV_GF1_VW_VOLUME));
#endif
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_ADDRESS_CONTROL);
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_VOLUME_CONTROL);
if (gus->gf1.enh_mode)
snd_gf1_write8(gus, SNDRV_GF1_VB_ACCUMULATOR, 0);
spin_unlock_irqrestore(&gus->reg_lock, flags);
#if 0
snd_gf1_lfo_shutdown(gus, voice, ULTRA_LFO_VIBRATO);
snd_gf1_lfo_shutdown(gus, voice, ULTRA_LFO_TREMOLO);
#endif
}
static void snd_gf1_clear_voices(struct snd_gus_card * gus, unsigned short v_min,
unsigned short v_max)
{
unsigned long flags;
unsigned int daddr;
unsigned short i, w_16;
daddr = gus->gf1.default_voice_address << 4;
for (i = v_min; i <= v_max; i++) {
#if 0
if (gus->gf1.syn_voices)
gus->gf1.syn_voices[i].flags = ~VFLG_DYNAMIC;
#endif
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_select_voice(gus, i);
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_ADDRESS_CONTROL); /* Voice Control Register = voice stop */
snd_gf1_ctrl_stop(gus, SNDRV_GF1_VB_VOLUME_CONTROL); /* Volume Ramp Control Register = ramp off */
if (gus->gf1.enh_mode)
snd_gf1_write8(gus, SNDRV_GF1_VB_MODE, gus->gf1.memory ? 0x02 : 0x82); /* Deactivate voice */
w_16 = snd_gf1_read8(gus, SNDRV_GF1_VB_ADDRESS_CONTROL) & 0x04;
snd_gf1_write16(gus, SNDRV_GF1_VW_FREQUENCY, 0x400);
snd_gf1_write_addr(gus, SNDRV_GF1_VA_START, daddr, w_16);
snd_gf1_write_addr(gus, SNDRV_GF1_VA_END, daddr, w_16);
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_START, 0);
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_END, 0);
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_RATE, 0);
snd_gf1_write16(gus, SNDRV_GF1_VW_VOLUME, 0);
snd_gf1_write_addr(gus, SNDRV_GF1_VA_CURRENT, daddr, w_16);
snd_gf1_write8(gus, SNDRV_GF1_VB_PAN, 7);
if (gus->gf1.enh_mode) {
snd_gf1_write8(gus, SNDRV_GF1_VB_ACCUMULATOR, 0);
snd_gf1_write16(gus, SNDRV_GF1_VW_EFFECT_VOLUME, 0);
snd_gf1_write16(gus, SNDRV_GF1_VW_EFFECT_VOLUME_FINAL, 0);
}
spin_unlock_irqrestore(&gus->reg_lock, flags);
#if 0
snd_gf1_lfo_shutdown(gus, i, ULTRA_LFO_VIBRATO);
snd_gf1_lfo_shutdown(gus, i, ULTRA_LFO_TREMOLO);
#endif
}
}
void snd_gf1_stop_voices(struct snd_gus_card * gus, unsigned short v_min, unsigned short v_max)
{
unsigned long flags;
short i, ramp_ok;
unsigned short ramp_end;
if (!in_interrupt()) { /* this can't be done in interrupt */
for (i = v_min, ramp_ok = 0; i <= v_max; i++) {
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_select_voice(gus, i);
ramp_end = snd_gf1_read16(gus, 9) >> 8;
if (ramp_end > SNDRV_GF1_MIN_OFFSET) {
ramp_ok++;
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_RATE, 20); /* ramp rate */
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_START, SNDRV_GF1_MIN_OFFSET); /* ramp start */
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_END, ramp_end); /* ramp end */
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_CONTROL, 0x40); /* ramp down */
if (gus->gf1.enh_mode) {
snd_gf1_delay(gus);
snd_gf1_write8(gus, SNDRV_GF1_VB_VOLUME_CONTROL, 0x40);
}
}
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
msleep_interruptible(50);
}
snd_gf1_clear_voices(gus, v_min, v_max);
}
static void snd_gf1_alloc_voice_use(struct snd_gus_card * gus,
struct snd_gus_voice * pvoice,
int type, int client, int port)
{
pvoice->use = 1;
switch (type) {
case SNDRV_GF1_VOICE_TYPE_PCM:
gus->gf1.pcm_alloc_voices++;
pvoice->pcm = 1;
break;
case SNDRV_GF1_VOICE_TYPE_SYNTH:
pvoice->synth = 1;
pvoice->client = client;
pvoice->port = port;
break;
case SNDRV_GF1_VOICE_TYPE_MIDI:
pvoice->midi = 1;
pvoice->client = client;
pvoice->port = port;
break;
}
}
struct snd_gus_voice *snd_gf1_alloc_voice(struct snd_gus_card * gus, int type, int client, int port)
{
struct snd_gus_voice *pvoice;
unsigned long flags;
int idx;
spin_lock_irqsave(&gus->voice_alloc, flags);
if (type == SNDRV_GF1_VOICE_TYPE_PCM) {
if (gus->gf1.pcm_alloc_voices >= gus->gf1.pcm_channels) {
spin_unlock_irqrestore(&gus->voice_alloc, flags);
return NULL;
}
}
for (idx = 0; idx < 32; idx++) {
pvoice = &gus->gf1.voices[idx];
if (!pvoice->use) {
snd_gf1_alloc_voice_use(gus, pvoice, type, client, port);
spin_unlock_irqrestore(&gus->voice_alloc, flags);
return pvoice;
}
}
for (idx = 0; idx < 32; idx++) {
pvoice = &gus->gf1.voices[idx];
if (pvoice->midi && !pvoice->client) {
snd_gf1_clear_voices(gus, pvoice->number, pvoice->number);
snd_gf1_alloc_voice_use(gus, pvoice, type, client, port);
spin_unlock_irqrestore(&gus->voice_alloc, flags);
return pvoice;
}
}
spin_unlock_irqrestore(&gus->voice_alloc, flags);
return NULL;
}
void snd_gf1_free_voice(struct snd_gus_card * gus, struct snd_gus_voice *voice)
{
unsigned long flags;
void (*private_free)(struct snd_gus_voice *voice);
void *private_data;
if (voice == NULL || !voice->use)
return;
snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_VOICE | voice->number);
snd_gf1_clear_voices(gus, voice->number, voice->number);
spin_lock_irqsave(&gus->voice_alloc, flags);
private_free = voice->private_free;
private_data = voice->private_data;
voice->private_free = NULL;
voice->private_data = NULL;
if (voice->pcm)
gus->gf1.pcm_alloc_voices--;
voice->use = voice->pcm = 0;
voice->sample_ops = NULL;
spin_unlock_irqrestore(&gus->voice_alloc, flags);
if (private_free)
private_free(voice);
}
/*
* call this function only by start of driver
*/
int snd_gf1_start(struct snd_gus_card * gus)
{
unsigned long flags;
unsigned int i;
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* disable IRQ & DAC */
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_JOYSTICK_DAC_LEVEL, gus->joystick_dac);
snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_ALL);
for (i = 0; i < 32; i++) {
gus->gf1.voices[i].number = i;
snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_VOICE | i);
}
snd_gf1_uart_cmd(gus, 0x03); /* huh.. this cleanup took me some time... */
if (gus->gf1.enh_mode) { /* enhanced mode !!!! */
snd_gf1_i_write8(gus, SNDRV_GF1_GB_GLOBAL_MODE, snd_gf1_i_look8(gus, SNDRV_GF1_GB_GLOBAL_MODE) | 0x01);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01);
}
snd_gf1_clear_regs(gus);
snd_gf1_select_active_voices(gus);
snd_gf1_delay(gus);
gus->gf1.default_voice_address = gus->gf1.memory > 0 ? 0 : 512 - 8;
/* initialize LFOs & clear LFOs memory */
if (gus->gf1.enh_mode && gus->gf1.memory) {
gus->gf1.hw_lfo = 1;
gus->gf1.default_voice_address += 1024;
} else {
gus->gf1.sw_lfo = 1;
}
#if 0
snd_gf1_lfo_init(gus);
#endif
if (gus->gf1.memory > 0)
for (i = 0; i < 4; i++)
snd_gf1_poke(gus, gus->gf1.default_voice_address + i, 0);
snd_gf1_clear_regs(gus);
snd_gf1_clear_voices(gus, 0, 31);
snd_gf1_look_regs(gus);
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 7); /* Reset Register = IRQ enable, DAC enable */
udelay(160);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 7); /* Reset Register = IRQ enable, DAC enable */
if (gus->gf1.enh_mode) { /* enhanced mode !!!! */
snd_gf1_i_write8(gus, SNDRV_GF1_GB_GLOBAL_MODE, snd_gf1_i_look8(gus, SNDRV_GF1_GB_GLOBAL_MODE) | 0x01);
snd_gf1_i_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01);
}
while ((snd_gf1_i_read8(gus, SNDRV_GF1_GB_VOICES_IRQ) & 0xc0) != 0xc0);
spin_lock_irqsave(&gus->reg_lock, flags);
outb(gus->gf1.active_voice = 0, GUSP(gus, GF1PAGE));
outb(gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG));
spin_unlock_irqrestore(&gus->reg_lock, flags);
snd_gf1_timers_init(gus);
snd_gf1_look_regs(gus);
snd_gf1_mem_init(gus);
snd_gf1_mem_proc_init(gus);
#ifdef CONFIG_SND_DEBUG
snd_gus_irq_profile_init(gus);
#endif
#if 0
if (gus->pnp_flag) {
if (gus->chip.playback_fifo_size > 0)
snd_gf1_i_write16(gus, SNDRV_GF1_GW_FIFO_RECORD_BASE_ADDR, gus->chip.playback_fifo_block->ptr >> 8);
if (gus->chip.record_fifo_size > 0)
snd_gf1_i_write16(gus, SNDRV_GF1_GW_FIFO_PLAY_BASE_ADDR, gus->chip.record_fifo_block->ptr >> 8);
snd_gf1_i_write16(gus, SNDRV_GF1_GW_FIFO_SIZE, gus->chip.interwave_fifo_reg);
}
#endif
return 0;
}
/*
* call this function only by shutdown of driver
*/
int snd_gf1_stop(struct snd_gus_card * gus)
{
snd_gf1_i_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, 0); /* stop all timers */
snd_gf1_stop_voices(gus, 0, 31); /* stop all voices */
snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* disable IRQ & DAC */
snd_gf1_timers_done(gus);
snd_gf1_mem_done(gus);
#if 0
snd_gf1_lfo_done(gus);
#endif
return 0;
}
| gpl-2.0 |
netarchy/nexus-s | drivers/gpu/pvr/queue.c | 154 | 33050 | /**********************************************************************
*
* Copyright (C) Imagination Technologies Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful but, except
* as otherwise stated in writing, without any warranty; without even the
* implied warranty of merchantability or fitness for a particular purpose.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Imagination Technologies Ltd. <gpl-support@imgtec.com>
* Home Park Estate, Kings Langley, Herts, WD4 8LZ, UK
*
******************************************************************************/
#include "services_headers.h"
#include "lists.h"
#include "ttrace.h"
#if defined(SUPPORT_DC_CMDCOMPLETE_WHEN_NO_LONGER_DISPLAYED)
#define DC_NUM_COMMANDS_PER_TYPE 2
#else
#define DC_NUM_COMMANDS_PER_TYPE 1
#endif
typedef struct _DEVICE_COMMAND_DATA_
{
PFN_CMD_PROC pfnCmdProc;
PCOMMAND_COMPLETE_DATA apsCmdCompleteData[DC_NUM_COMMANDS_PER_TYPE];
IMG_UINT32 ui32CCBOffset;
IMG_UINT32 ui32MaxDstSyncCount;
IMG_UINT32 ui32MaxSrcSyncCount;
} DEVICE_COMMAND_DATA;
#if defined(__linux__) && defined(__KERNEL__)
#include "proc.h"
void ProcSeqShowQueue(struct seq_file *sfile,void* el)
{
PVRSRV_QUEUE_INFO *psQueue = (PVRSRV_QUEUE_INFO*)el;
IMG_INT cmds = 0;
IMG_SIZE_T ui32ReadOffset;
IMG_SIZE_T ui32WriteOffset;
PVRSRV_COMMAND *psCmd;
if(el == PVR_PROC_SEQ_START_TOKEN)
{
seq_printf( sfile,
"Command Queues\n"
"Queue CmdPtr Pid Command Size DevInd DSC SSC #Data ...\n");
return;
}
ui32ReadOffset = psQueue->ui32ReadOffset;
ui32WriteOffset = psQueue->ui32WriteOffset;
while (ui32ReadOffset != ui32WriteOffset)
{
psCmd= (PVRSRV_COMMAND *)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + ui32ReadOffset);
seq_printf(sfile, "%x %x %5u %6u %3u %5u %2u %2u %3u \n",
(IMG_UINTPTR_T)psQueue,
(IMG_UINTPTR_T)psCmd,
psCmd->ui32ProcessID,
psCmd->CommandType,
psCmd->uCmdSize,
psCmd->ui32DevIndex,
psCmd->ui32DstSyncCount,
psCmd->ui32SrcSyncCount,
psCmd->uDataSize);
{
IMG_UINT32 i;
for (i = 0; i < psCmd->ui32SrcSyncCount; i++)
{
PVRSRV_SYNC_DATA *psSyncData = psCmd->psSrcSync[i].psKernelSyncInfoKM->psSyncData;
seq_printf(sfile, " Sync %u: ROP/ROC: 0x%x/0x%x WOP/WOC: 0x%x/0x%x ROC-VA: 0x%x WOC-VA: 0x%x\n",
i,
psCmd->psSrcSync[i].ui32ReadOps2Pending,
psSyncData->ui32ReadOps2Complete,
psCmd->psSrcSync[i].ui32WriteOpsPending,
psSyncData->ui32WriteOpsComplete,
psCmd->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCmd->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr);
}
}
ui32ReadOffset += psCmd->uCmdSize;
ui32ReadOffset &= psQueue->ui32QueueSize - 1;
cmds++;
}
if (cmds == 0)
{
seq_printf(sfile, "%x <empty>\n", (IMG_UINTPTR_T)psQueue);
}
}
void* ProcSeqOff2ElementQueue(struct seq_file * sfile, loff_t off)
{
PVRSRV_QUEUE_INFO *psQueue = IMG_NULL;
SYS_DATA *psSysData;
PVR_UNREFERENCED_PARAMETER(sfile);
if(!off)
{
return PVR_PROC_SEQ_START_TOKEN;
}
psSysData = SysAcquireDataNoCheck();
if (psSysData != IMG_NULL)
{
for (psQueue = psSysData->psQueueList; (((--off) > 0) && (psQueue != IMG_NULL)); psQueue = psQueue->psNextKM);
}
return psQueue;
}
#endif
#define GET_SPACE_IN_CMDQ(psQueue) \
((((psQueue)->ui32ReadOffset - (psQueue)->ui32WriteOffset) \
+ ((psQueue)->ui32QueueSize - 1)) & ((psQueue)->ui32QueueSize - 1))
#define UPDATE_QUEUE_WOFF(psQueue, ui32Size) \
(psQueue)->ui32WriteOffset = ((psQueue)->ui32WriteOffset + (ui32Size)) \
& ((psQueue)->ui32QueueSize - 1);
#define SYNCOPS_STALE(ui32OpsComplete, ui32OpsPending) \
((ui32OpsComplete) >= (ui32OpsPending))
#ifdef INLINE_IS_PRAGMA
#pragma inline(PVRSRVGetWriteOpsPending)
#endif
static INLINE
IMG_UINT32 PVRSRVGetWriteOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
{
IMG_UINT32 ui32WriteOpsPending;
if(bIsReadOp)
{
ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending;
}
else
{
ui32WriteOpsPending = psSyncInfo->psSyncData->ui32WriteOpsPending++;
}
return ui32WriteOpsPending;
}
#ifdef INLINE_IS_PRAGMA
#pragma inline(PVRSRVGetReadOpsPending)
#endif
static INLINE
IMG_UINT32 PVRSRVGetReadOpsPending(PVRSRV_KERNEL_SYNC_INFO *psSyncInfo, IMG_BOOL bIsReadOp)
{
IMG_UINT32 ui32ReadOpsPending;
if(bIsReadOp)
{
ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending++;
}
else
{
ui32ReadOpsPending = psSyncInfo->psSyncData->ui32ReadOps2Pending;
}
return ui32ReadOpsPending;
}
static IMG_VOID QueueDumpCmdComplete(COMMAND_COMPLETE_DATA *psCmdCompleteData,
IMG_UINT32 i,
IMG_BOOL bIsSrc)
{
PVRSRV_SYNC_OBJECT *psSyncObject;
psSyncObject = bIsSrc ? psCmdCompleteData->psSrcSync : psCmdCompleteData->psDstSync;
if (psCmdCompleteData->bInUse)
{
PVR_LOG(("\t%s %u: ROC DevVAddr:0x%X ROP:0x%x ROC:0x%x, WOC DevVAddr:0x%X WOP:0x%x WOC:0x%x",
bIsSrc ? "SRC" : "DEST", i,
psSyncObject[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Pending,
psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete,
psSyncObject[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsPending,
psSyncObject[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete))
}
else
{
PVR_LOG(("\t%s %u: (Not in use)", bIsSrc ? "SRC" : "DEST", i))
}
}
static IMG_VOID QueueDumpDebugInfo_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
{
if (psDeviceNode->sDevId.eDeviceClass == PVRSRV_DEVICE_CLASS_DISPLAY)
{
IMG_UINT32 ui32CmdCounter, ui32SyncCounter;
SYS_DATA *psSysData;
DEVICE_COMMAND_DATA *psDeviceCommandData;
PCOMMAND_COMPLETE_DATA psCmdCompleteData;
SysAcquireData(&psSysData);
psDeviceCommandData = psSysData->apsDeviceCommandData[psDeviceNode->sDevId.ui32DeviceIndex];
if (psDeviceCommandData != IMG_NULL)
{
for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
{
psCmdCompleteData = psDeviceCommandData[DC_FLIP_COMMAND].apsCmdCompleteData[ui32CmdCounter];
PVR_LOG(("Flip Command Complete Data %u for display device %u:",
ui32CmdCounter, psDeviceNode->sDevId.ui32DeviceIndex))
for (ui32SyncCounter = 0;
ui32SyncCounter < psCmdCompleteData->ui32SrcSyncCount;
ui32SyncCounter++)
{
QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_TRUE);
}
for (ui32SyncCounter = 0;
ui32SyncCounter < psCmdCompleteData->ui32DstSyncCount;
ui32SyncCounter++)
{
QueueDumpCmdComplete(psCmdCompleteData, ui32SyncCounter, IMG_FALSE);
}
}
}
else
{
PVR_LOG(("There is no Command Complete Data for display device %u", psDeviceNode->sDevId.ui32DeviceIndex))
}
}
}
IMG_VOID QueueDumpDebugInfo(IMG_VOID)
{
SYS_DATA *psSysData;
SysAcquireData(&psSysData);
List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList, &QueueDumpDebugInfo_ForEachCb);
}
static IMG_SIZE_T NearestPower2(IMG_SIZE_T ui32Value)
{
IMG_SIZE_T ui32Temp, ui32Result = 1;
if(!ui32Value)
return 0;
ui32Temp = ui32Value - 1;
while(ui32Temp)
{
ui32Result <<= 1;
ui32Temp >>= 1;
}
return ui32Result;
}
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVCreateCommandQueueKM(IMG_SIZE_T ui32QueueSize,
PVRSRV_QUEUE_INFO **ppsQueueInfo)
{
PVRSRV_QUEUE_INFO *psQueueInfo;
IMG_SIZE_T ui32Power2QueueSize = NearestPower2(ui32QueueSize);
SYS_DATA *psSysData;
PVRSRV_ERROR eError;
IMG_HANDLE hMemBlock;
SysAcquireData(&psSysData);
eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
sizeof(PVRSRV_QUEUE_INFO),
(IMG_VOID **)&psQueueInfo, &hMemBlock,
"Queue Info");
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue struct"));
goto ErrorExit;
}
OSMemSet(psQueueInfo, 0, sizeof(PVRSRV_QUEUE_INFO));
psQueueInfo->hMemBlock[0] = hMemBlock;
psQueueInfo->ui32ProcessID = OSGetCurrentProcessIDKM();
eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
ui32Power2QueueSize + PVRSRV_MAX_CMD_SIZE,
&psQueueInfo->pvLinQueueKM, &hMemBlock,
"Command Queue");
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateCommandQueueKM: Failed to alloc queue buffer"));
goto ErrorExit;
}
psQueueInfo->hMemBlock[1] = hMemBlock;
psQueueInfo->pvLinQueueUM = psQueueInfo->pvLinQueueKM;
PVR_ASSERT(psQueueInfo->ui32ReadOffset == 0);
PVR_ASSERT(psQueueInfo->ui32WriteOffset == 0);
psQueueInfo->ui32QueueSize = ui32Power2QueueSize;
if (psSysData->psQueueList == IMG_NULL)
{
eError = OSCreateResource(&psSysData->sQProcessResource);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
}
eError = OSLockResource(&psSysData->sQProcessResource,
KERNEL_ID);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
psQueueInfo->psNextKM = psSysData->psQueueList;
psSysData->psQueueList = psQueueInfo;
eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
*ppsQueueInfo = psQueueInfo;
return PVRSRV_OK;
ErrorExit:
if(psQueueInfo)
{
if(psQueueInfo->pvLinQueueKM)
{
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
psQueueInfo->ui32QueueSize,
psQueueInfo->pvLinQueueKM,
psQueueInfo->hMemBlock[1]);
psQueueInfo->pvLinQueueKM = IMG_NULL;
}
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
sizeof(PVRSRV_QUEUE_INFO),
psQueueInfo,
psQueueInfo->hMemBlock[0]);
}
return eError;
}
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVDestroyCommandQueueKM(PVRSRV_QUEUE_INFO *psQueueInfo)
{
PVRSRV_QUEUE_INFO *psQueue;
SYS_DATA *psSysData;
PVRSRV_ERROR eError;
IMG_BOOL bTimeout = IMG_TRUE;
SysAcquireData(&psSysData);
psQueue = psSysData->psQueueList;
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
{
bTimeout = IMG_FALSE;
break;
}
OSSleepms(1);
} END_LOOP_UNTIL_TIMEOUT();
if (bTimeout)
{
PVR_DPF((PVR_DBG_ERROR,"PVRSRVDestroyCommandQueueKM : Failed to empty queue"));
eError = PVRSRV_ERROR_CANNOT_FLUSH_QUEUE;
goto ErrorExit;
}
eError = OSLockResource(&psSysData->sQProcessResource,
KERNEL_ID);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
if(psQueue == psQueueInfo)
{
psSysData->psQueueList = psQueueInfo->psNextKM;
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
NearestPower2(psQueueInfo->ui32QueueSize) + PVRSRV_MAX_CMD_SIZE,
psQueueInfo->pvLinQueueKM,
psQueueInfo->hMemBlock[1]);
psQueueInfo->pvLinQueueKM = IMG_NULL;
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
sizeof(PVRSRV_QUEUE_INFO),
psQueueInfo,
psQueueInfo->hMemBlock[0]);
psQueueInfo = IMG_NULL;
}
else
{
while(psQueue)
{
if(psQueue->psNextKM == psQueueInfo)
{
psQueue->psNextKM = psQueueInfo->psNextKM;
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
psQueueInfo->ui32QueueSize,
psQueueInfo->pvLinQueueKM,
psQueueInfo->hMemBlock[1]);
psQueueInfo->pvLinQueueKM = IMG_NULL;
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
sizeof(PVRSRV_QUEUE_INFO),
psQueueInfo,
psQueueInfo->hMemBlock[0]);
psQueueInfo = IMG_NULL;
break;
}
psQueue = psQueue->psNextKM;
}
if(!psQueue)
{
eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
eError = PVRSRV_ERROR_INVALID_PARAMS;
goto ErrorExit;
}
}
eError = OSUnlockResource(&psSysData->sQProcessResource, KERNEL_ID);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
if (psSysData->psQueueList == IMG_NULL)
{
eError = OSDestroyResource(&psSysData->sQProcessResource);
if (eError != PVRSRV_OK)
{
goto ErrorExit;
}
}
ErrorExit:
return eError;
}
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVGetQueueSpaceKM(PVRSRV_QUEUE_INFO *psQueue,
IMG_SIZE_T ui32ParamSize,
IMG_VOID **ppvSpace)
{
IMG_BOOL bTimeout = IMG_TRUE;
ui32ParamSize = (ui32ParamSize+3) & 0xFFFFFFFC;
if (ui32ParamSize > PVRSRV_MAX_CMD_SIZE)
{
PVR_DPF((PVR_DBG_WARNING,"PVRSRVGetQueueSpace: max command size is %d bytes", PVRSRV_MAX_CMD_SIZE));
return PVRSRV_ERROR_CMD_TOO_BIG;
}
LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
{
if (GET_SPACE_IN_CMDQ(psQueue) > ui32ParamSize)
{
bTimeout = IMG_FALSE;
break;
}
OSSleepms(1);
} END_LOOP_UNTIL_TIMEOUT();
if (bTimeout == IMG_TRUE)
{
*ppvSpace = IMG_NULL;
return PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE;
}
else
{
*ppvSpace = (IMG_VOID *)((IMG_UINTPTR_T)psQueue->pvLinQueueUM + psQueue->ui32WriteOffset);
}
return PVRSRV_OK;
}
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVInsertCommandKM(PVRSRV_QUEUE_INFO *psQueue,
PVRSRV_COMMAND **ppsCommand,
IMG_UINT32 ui32DevIndex,
IMG_UINT16 CommandType,
IMG_UINT32 ui32DstSyncCount,
PVRSRV_KERNEL_SYNC_INFO *apsDstSync[],
IMG_UINT32 ui32SrcSyncCount,
PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[],
IMG_SIZE_T ui32DataByteSize,
PFN_QUEUE_COMMAND_COMPLETE pfnCommandComplete,
IMG_HANDLE hCallbackData)
{
PVRSRV_ERROR eError;
PVRSRV_COMMAND *psCommand;
IMG_SIZE_T ui32CommandSize;
IMG_UINT32 i;
SYS_DATA *psSysData;
DEVICE_COMMAND_DATA *psDeviceCommandData;
SysAcquireData(&psSysData);
psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex];
if ((psDeviceCommandData[CommandType].ui32MaxDstSyncCount < ui32DstSyncCount) ||
(psDeviceCommandData[CommandType].ui32MaxSrcSyncCount < ui32SrcSyncCount))
{
PVR_DPF((PVR_DBG_ERROR, "PVRSRVInsertCommandKM: Too many syncs"));
return PVRSRV_ERROR_INVALID_PARAMS;
}
ui32DataByteSize = (ui32DataByteSize + 3UL) & ~3UL;
ui32CommandSize = sizeof(PVRSRV_COMMAND)
+ ((ui32DstSyncCount + ui32SrcSyncCount) * sizeof(PVRSRV_SYNC_OBJECT))
+ ui32DataByteSize;
eError = PVRSRVGetQueueSpaceKM (psQueue, ui32CommandSize, (IMG_VOID**)&psCommand);
if(eError != PVRSRV_OK)
{
return eError;
}
psCommand->ui32ProcessID = OSGetCurrentProcessIDKM();
psCommand->uCmdSize = ui32CommandSize;
psCommand->ui32DevIndex = ui32DevIndex;
psCommand->CommandType = CommandType;
psCommand->ui32DstSyncCount = ui32DstSyncCount;
psCommand->ui32SrcSyncCount = ui32SrcSyncCount;
psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand) + sizeof(PVRSRV_COMMAND));
psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psDstSync)
+ (ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psCommand->psSrcSync)
+ (ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
psCommand->uDataSize = ui32DataByteSize;
psCommand->pfnCommandComplete = pfnCommandComplete;
psCommand->hCallbackData = hCallbackData;
PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_START, QUEUE_TOKEN_INSERTKM);
PVR_TTRACE_UI32(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_NONE,
QUEUE_TOKEN_COMMAND_TYPE, CommandType);
for (i=0; i<ui32DstSyncCount; i++)
{
PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC,
apsDstSync[i], PVRSRV_SYNCOP_SAMPLE);
psCommand->psDstSync[i].psKernelSyncInfoKM = apsDstSync[i];
psCommand->psDstSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsDstSync[i], IMG_FALSE);
psCommand->psDstSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsDstSync[i], IMG_FALSE);
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
i, psCommand->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCommand->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCommand->psDstSync[i].ui32ReadOps2Pending,
psCommand->psDstSync[i].ui32WriteOpsPending));
}
for (i=0; i<ui32SrcSyncCount; i++)
{
PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_DST_SYNC,
apsSrcSync[i], PVRSRV_SYNCOP_SAMPLE);
psCommand->psSrcSync[i].psKernelSyncInfoKM = apsSrcSync[i];
psCommand->psSrcSync[i].ui32WriteOpsPending = PVRSRVGetWriteOpsPending(apsSrcSync[i], IMG_TRUE);
psCommand->psSrcSync[i].ui32ReadOps2Pending = PVRSRVGetReadOpsPending(apsSrcSync[i], IMG_TRUE);
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVInsertCommandKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
i, psCommand->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCommand->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCommand->psSrcSync[i].ui32ReadOps2Pending,
psCommand->psSrcSync[i].ui32WriteOpsPending));
}
PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_END, QUEUE_TOKEN_INSERTKM);
*ppsCommand = psCommand;
return PVRSRV_OK;
}
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVSubmitCommandKM(PVRSRV_QUEUE_INFO *psQueue,
PVRSRV_COMMAND *psCommand)
{
if (psCommand->ui32DstSyncCount > 0)
{
psCommand->psDstSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND));
}
if (psCommand->ui32SrcSyncCount > 0)
{
psCommand->psSrcSync = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
+ (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
}
psCommand->pvData = (PVRSRV_SYNC_OBJECT*)(((IMG_UINTPTR_T)psQueue->pvLinQueueKM)
+ psQueue->ui32WriteOffset + sizeof(PVRSRV_COMMAND)
+ (psCommand->ui32DstSyncCount * sizeof(PVRSRV_SYNC_OBJECT))
+ (psCommand->ui32SrcSyncCount * sizeof(PVRSRV_SYNC_OBJECT)));
UPDATE_QUEUE_WOFF(psQueue, psCommand->uCmdSize);
return PVRSRV_OK;
}
static
PVRSRV_ERROR CheckIfSyncIsQueued(PVRSRV_SYNC_OBJECT *psSync, COMMAND_COMPLETE_DATA *psCmdData)
{
IMG_UINT32 k;
if (psCmdData->bInUse)
{
for (k=0;k<psCmdData->ui32SrcSyncCount;k++)
{
if (psSync->psKernelSyncInfoKM == psCmdData->psSrcSync[k].psKernelSyncInfoKM)
{
PVRSRV_SYNC_DATA *psSyncData = psSync->psKernelSyncInfoKM->psSyncData;
IMG_UINT32 ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
if (ui32WriteOpsComplete == psSync->ui32WriteOpsPending)
{
return PVRSRV_OK;
}
else
{
if (SYNCOPS_STALE(ui32WriteOpsComplete, psSync->ui32WriteOpsPending))
{
PVR_DPF((PVR_DBG_WARNING,
"CheckIfSyncIsQueued: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
(IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psSync->ui32WriteOpsPending));
return PVRSRV_OK;
}
}
}
}
}
return PVRSRV_ERROR_FAILED_DEPENDENCIES;
}
static
PVRSRV_ERROR PVRSRVProcessCommand(SYS_DATA *psSysData,
PVRSRV_COMMAND *psCommand,
IMG_BOOL bFlush)
{
PVRSRV_SYNC_OBJECT *psWalkerObj;
PVRSRV_SYNC_OBJECT *psEndObj;
IMG_UINT32 i;
COMMAND_COMPLETE_DATA *psCmdCompleteData;
PVRSRV_ERROR eError = PVRSRV_OK;
IMG_UINT32 ui32WriteOpsComplete;
IMG_UINT32 ui32ReadOpsComplete;
DEVICE_COMMAND_DATA *psDeviceCommandData;
IMG_UINT32 ui32CCBOffset;
psWalkerObj = psCommand->psDstSync;
psEndObj = psWalkerObj + psCommand->ui32DstSyncCount;
while (psWalkerObj < psEndObj)
{
PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete;
if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
|| (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending))
{
if (!bFlush ||
!SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
!SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
{
return PVRSRV_ERROR_FAILED_DEPENDENCIES;
}
}
psWalkerObj++;
}
psWalkerObj = psCommand->psSrcSync;
psEndObj = psWalkerObj + psCommand->ui32SrcSyncCount;
while (psWalkerObj < psEndObj)
{
PVRSRV_SYNC_DATA *psSyncData = psWalkerObj->psKernelSyncInfoKM->psSyncData;
ui32ReadOpsComplete = psSyncData->ui32ReadOps2Complete;
ui32WriteOpsComplete = psSyncData->ui32WriteOpsComplete;
if ((ui32WriteOpsComplete != psWalkerObj->ui32WriteOpsPending)
|| (ui32ReadOpsComplete != psWalkerObj->ui32ReadOps2Pending))
{
if (!bFlush &&
SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) &&
SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
{
PVR_DPF((PVR_DBG_WARNING,
"PVRSRVProcessCommand: Stale syncops psSyncData:0x%x ui32WriteOpsComplete:0x%x ui32WriteOpsPending:0x%x",
(IMG_UINTPTR_T)psSyncData, ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending));
}
if (!bFlush ||
!SYNCOPS_STALE(ui32WriteOpsComplete, psWalkerObj->ui32WriteOpsPending) ||
!SYNCOPS_STALE(ui32ReadOpsComplete, psWalkerObj->ui32ReadOps2Pending))
{
IMG_UINT32 j;
PVRSRV_ERROR eError;
IMG_BOOL bFound = IMG_FALSE;
psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex];
for (j=0;j<DC_NUM_COMMANDS_PER_TYPE;j++)
{
eError = CheckIfSyncIsQueued(psWalkerObj, psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[j]);
if (eError == PVRSRV_OK)
{
bFound = IMG_TRUE;
}
}
if (!bFound)
return PVRSRV_ERROR_FAILED_DEPENDENCIES;
}
}
psWalkerObj++;
}
if (psCommand->ui32DevIndex >= SYS_DEVICE_COUNT)
{
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVProcessCommand: invalid DeviceType 0x%x",
psCommand->ui32DevIndex));
return PVRSRV_ERROR_INVALID_PARAMS;
}
psDeviceCommandData = psSysData->apsDeviceCommandData[psCommand->ui32DevIndex];
ui32CCBOffset = psDeviceCommandData[psCommand->CommandType].ui32CCBOffset;
psCmdCompleteData = psDeviceCommandData[psCommand->CommandType].apsCmdCompleteData[ui32CCBOffset];
if (psCmdCompleteData->bInUse)
{
return PVRSRV_ERROR_FAILED_DEPENDENCIES;
}
psCmdCompleteData->bInUse = IMG_TRUE;
psCmdCompleteData->ui32DstSyncCount = psCommand->ui32DstSyncCount;
for (i=0; i<psCommand->ui32DstSyncCount; i++)
{
psCmdCompleteData->psDstSync[i] = psCommand->psDstSync[i];
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)",
i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending,
psCmdCompleteData->psDstSync[i].ui32WriteOpsPending,
ui32CCBOffset));
}
psCmdCompleteData->pfnCommandComplete = psCommand->pfnCommandComplete;
psCmdCompleteData->hCallbackData = psCommand->hCallbackData;
psCmdCompleteData->ui32SrcSyncCount = psCommand->ui32SrcSyncCount;
for (i=0; i<psCommand->ui32SrcSyncCount; i++)
{
psCmdCompleteData->psSrcSync[i] = psCommand->psSrcSync[i];
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVProcessCommand: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x (CCB:%u)",
i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending,
psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending,
ui32CCBOffset));
}
if (psDeviceCommandData[psCommand->CommandType].pfnCmdProc((IMG_HANDLE)psCmdCompleteData,
(IMG_UINT32)psCommand->uDataSize,
psCommand->pvData) == IMG_FALSE)
{
psCmdCompleteData->bInUse = IMG_FALSE;
eError = PVRSRV_ERROR_CMD_NOT_PROCESSED;
}
psDeviceCommandData[psCommand->CommandType].ui32CCBOffset = (ui32CCBOffset + 1) % DC_NUM_COMMANDS_PER_TYPE;
return eError;
}
static IMG_VOID PVRSRVProcessQueues_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode)
{
if (psDeviceNode->bReProcessDeviceCommandComplete &&
psDeviceNode->pfnDeviceCommandComplete != IMG_NULL)
{
(*psDeviceNode->pfnDeviceCommandComplete)(psDeviceNode);
}
}
IMG_EXPORT
PVRSRV_ERROR PVRSRVProcessQueues(IMG_BOOL bFlush)
{
PVRSRV_QUEUE_INFO *psQueue;
SYS_DATA *psSysData;
PVRSRV_COMMAND *psCommand;
SysAcquireData(&psSysData);
while (OSLockResource(&psSysData->sQProcessResource, ISR_ID) != PVRSRV_OK)
{
OSWaitus(1);
};
psQueue = psSysData->psQueueList;
if(!psQueue)
{
PVR_DPF((PVR_DBG_MESSAGE,"No Queues installed - cannot process commands"));
}
if (bFlush)
{
PVRSRVSetDCState(DC_STATE_FLUSH_COMMANDS);
}
while (psQueue)
{
while (psQueue->ui32ReadOffset != psQueue->ui32WriteOffset)
{
psCommand = (PVRSRV_COMMAND*)((IMG_UINTPTR_T)psQueue->pvLinQueueKM + psQueue->ui32ReadOffset);
if (PVRSRVProcessCommand(psSysData, psCommand, bFlush) == PVRSRV_OK)
{
UPDATE_QUEUE_ROFF(psQueue, psCommand->uCmdSize)
continue;
}
break;
}
psQueue = psQueue->psNextKM;
}
if (bFlush)
{
PVRSRVSetDCState(DC_STATE_NO_FLUSH_COMMANDS);
}
List_PVRSRV_DEVICE_NODE_ForEach(psSysData->psDeviceNodeList,
&PVRSRVProcessQueues_ForEachCb);
OSUnlockResource(&psSysData->sQProcessResource, ISR_ID);
return PVRSRV_OK;
}
#if defined(SUPPORT_CUSTOM_SWAP_OPERATIONS)
IMG_INTERNAL
IMG_VOID PVRSRVFreeCommandCompletePacketKM(IMG_HANDLE hCmdCookie,
IMG_BOOL bScheduleMISR)
{
COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
SYS_DATA *psSysData;
SysAcquireData(&psSysData);
psCmdCompleteData->bInUse = IMG_FALSE;
PVRSRVScheduleDeviceCallbacks();
if(bScheduleMISR)
{
OSScheduleMISR(psSysData);
}
}
#endif
IMG_EXPORT
IMG_VOID PVRSRVCommandCompleteKM(IMG_HANDLE hCmdCookie,
IMG_BOOL bScheduleMISR)
{
IMG_UINT32 i;
COMMAND_COMPLETE_DATA *psCmdCompleteData = (COMMAND_COMPLETE_DATA *)hCmdCookie;
SYS_DATA *psSysData;
SysAcquireData(&psSysData);
PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_START,
QUEUE_TOKEN_COMMAND_COMPLETE);
for (i=0; i<psCmdCompleteData->ui32DstSyncCount; i++)
{
psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->psSyncData->ui32WriteOpsComplete++;
PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_DST,
psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM,
PVRSRV_SYNCOP_COMPLETE);
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Dst %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
i, psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCmdCompleteData->psDstSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCmdCompleteData->psDstSync[i].ui32ReadOps2Pending,
psCmdCompleteData->psDstSync[i].ui32WriteOpsPending));
}
for (i=0; i<psCmdCompleteData->ui32SrcSyncCount; i++)
{
psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->psSyncData->ui32ReadOps2Complete++;
PVR_TTRACE_SYNC_OBJECT(PVRSRV_TRACE_GROUP_QUEUE, QUEUE_TOKEN_UPDATE_SRC,
psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM,
PVRSRV_SYNCOP_COMPLETE);
PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVCommandCompleteKM: Src %u RO-VA:0x%x WO-VA:0x%x ROP:0x%x WOP:0x%x",
i, psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sReadOps2CompleteDevVAddr.uiAddr,
psCmdCompleteData->psSrcSync[i].psKernelSyncInfoKM->sWriteOpsCompleteDevVAddr.uiAddr,
psCmdCompleteData->psSrcSync[i].ui32ReadOps2Pending,
psCmdCompleteData->psSrcSync[i].ui32WriteOpsPending));
}
PVR_TTRACE(PVRSRV_TRACE_GROUP_QUEUE, PVRSRV_TRACE_CLASS_CMD_COMP_END,
QUEUE_TOKEN_COMMAND_COMPLETE);
if (psCmdCompleteData->pfnCommandComplete)
{
psCmdCompleteData->pfnCommandComplete(psCmdCompleteData->hCallbackData);
}
psCmdCompleteData->bInUse = IMG_FALSE;
PVRSRVScheduleDeviceCallbacks();
if(bScheduleMISR)
{
OSScheduleMISR(psSysData);
}
}
IMG_EXPORT
PVRSRV_ERROR PVRSRVRegisterCmdProcListKM(IMG_UINT32 ui32DevIndex,
PFN_CMD_PROC *ppfnCmdProcList,
IMG_UINT32 ui32MaxSyncsPerCmd[][2],
IMG_UINT32 ui32CmdCount)
{
SYS_DATA *psSysData;
PVRSRV_ERROR eError;
IMG_UINT32 ui32CmdCounter, ui32CmdTypeCounter;
IMG_SIZE_T ui32AllocSize;
DEVICE_COMMAND_DATA *psDeviceCommandData;
COMMAND_COMPLETE_DATA *psCmdCompleteData;
if(ui32DevIndex >= SYS_DEVICE_COUNT)
{
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVRegisterCmdProcListKM: invalid DeviceType 0x%x",
ui32DevIndex));
return PVRSRV_ERROR_INVALID_PARAMS;
}
SysAcquireData(&psSysData);
ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData);
eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
ui32AllocSize,
(IMG_VOID **)&psDeviceCommandData, IMG_NULL,
"Array of Pointers for Command Store");
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc CC data"));
goto ErrorExit;
}
psSysData->apsDeviceCommandData[ui32DevIndex] = psDeviceCommandData;
for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++)
{
psDeviceCommandData[ui32CmdTypeCounter].pfnCmdProc = ppfnCmdProcList[ui32CmdTypeCounter];
psDeviceCommandData[ui32CmdTypeCounter].ui32CCBOffset = 0;
psDeviceCommandData[ui32CmdTypeCounter].ui32MaxDstSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0];
psDeviceCommandData[ui32CmdTypeCounter].ui32MaxSrcSyncCount = ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1];
for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
{
ui32AllocSize = sizeof(COMMAND_COMPLETE_DATA)
+ ((ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]
+ ui32MaxSyncsPerCmd[ui32CmdTypeCounter][1])
* sizeof(PVRSRV_SYNC_OBJECT));
eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP,
ui32AllocSize,
(IMG_VOID **)&psCmdCompleteData,
IMG_NULL,
"Command Complete Data");
if (eError != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,"PVRSRVRegisterCmdProcListKM: Failed to alloc cmd %d", ui32CmdTypeCounter));
goto ErrorExit;
}
psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = psCmdCompleteData;
OSMemSet(psCmdCompleteData, 0x00, ui32AllocSize);
psCmdCompleteData->psDstSync = (PVRSRV_SYNC_OBJECT*)
(((IMG_UINTPTR_T)psCmdCompleteData)
+ sizeof(COMMAND_COMPLETE_DATA));
psCmdCompleteData->psSrcSync = (PVRSRV_SYNC_OBJECT*)
(((IMG_UINTPTR_T)psCmdCompleteData->psDstSync)
+ (sizeof(PVRSRV_SYNC_OBJECT) * ui32MaxSyncsPerCmd[ui32CmdTypeCounter][0]));
psCmdCompleteData->ui32AllocSize = (IMG_UINT32)ui32AllocSize;
}
}
return PVRSRV_OK;
ErrorExit:
if (PVRSRVRemoveCmdProcListKM(ui32DevIndex, ui32CmdCount) != PVRSRV_OK)
{
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVRegisterCmdProcListKM: Failed to clean up after error, device 0x%x",
ui32DevIndex));
}
return eError;
}
IMG_EXPORT
PVRSRV_ERROR PVRSRVRemoveCmdProcListKM(IMG_UINT32 ui32DevIndex,
IMG_UINT32 ui32CmdCount)
{
SYS_DATA *psSysData;
IMG_UINT32 ui32CmdTypeCounter, ui32CmdCounter;
DEVICE_COMMAND_DATA *psDeviceCommandData;
COMMAND_COMPLETE_DATA *psCmdCompleteData;
IMG_SIZE_T ui32AllocSize;
if(ui32DevIndex >= SYS_DEVICE_COUNT)
{
PVR_DPF((PVR_DBG_ERROR,
"PVRSRVRemoveCmdProcListKM: invalid DeviceType 0x%x",
ui32DevIndex));
return PVRSRV_ERROR_INVALID_PARAMS;
}
SysAcquireData(&psSysData);
psDeviceCommandData = psSysData->apsDeviceCommandData[ui32DevIndex];
if(psDeviceCommandData != IMG_NULL)
{
for (ui32CmdTypeCounter = 0; ui32CmdTypeCounter < ui32CmdCount; ui32CmdTypeCounter++)
{
for (ui32CmdCounter = 0; ui32CmdCounter < DC_NUM_COMMANDS_PER_TYPE; ui32CmdCounter++)
{
psCmdCompleteData = psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter];
if (psCmdCompleteData != IMG_NULL)
{
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, psCmdCompleteData->ui32AllocSize,
psCmdCompleteData, IMG_NULL);
psDeviceCommandData[ui32CmdTypeCounter].apsCmdCompleteData[ui32CmdCounter] = IMG_NULL;
}
}
}
ui32AllocSize = ui32CmdCount * sizeof(*psDeviceCommandData);
OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, ui32AllocSize, psDeviceCommandData, IMG_NULL);
psSysData->apsDeviceCommandData[ui32DevIndex] = IMG_NULL;
}
return PVRSRV_OK;
}
| gpl-2.0 |
elmo2k3/linux | drivers/rtc/rtc-max6900.c | 154 | 6715 | /*
* rtc class driver for the Maxim MAX6900 chip
*
* Author: Dale Farnsworth <dale@farnsworth.org>
*
* based on previously existing rtc class drivers
*
* 2007 (c) MontaVista, Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#define DRV_VERSION "0.2"
/*
* register indices
*/
#define MAX6900_REG_SC 0 /* seconds 00-59 */
#define MAX6900_REG_MN 1 /* minutes 00-59 */
#define MAX6900_REG_HR 2 /* hours 00-23 */
#define MAX6900_REG_DT 3 /* day of month 00-31 */
#define MAX6900_REG_MO 4 /* month 01-12 */
#define MAX6900_REG_DW 5 /* day of week 1-7 */
#define MAX6900_REG_YR 6 /* year 00-99 */
#define MAX6900_REG_CT 7 /* control */
/* register 8 is undocumented */
#define MAX6900_REG_CENTURY 9 /* century */
#define MAX6900_REG_LEN 10
#define MAX6900_BURST_LEN 8 /* can burst r/w first 8 regs */
#define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */
/*
* register read/write commands
*/
#define MAX6900_REG_CONTROL_WRITE 0x8e
#define MAX6900_REG_CENTURY_WRITE 0x92
#define MAX6900_REG_CENTURY_READ 0x93
#define MAX6900_REG_RESERVED_READ 0x96
#define MAX6900_REG_BURST_WRITE 0xbe
#define MAX6900_REG_BURST_READ 0xbf
#define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */
static struct i2c_driver max6900_driver;
static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
{
u8 reg_burst_read[1] = { MAX6900_REG_BURST_READ };
u8 reg_century_read[1] = { MAX6900_REG_CENTURY_READ };
struct i2c_msg msgs[4] = {
{
.addr = client->addr,
.flags = 0, /* write */
.len = sizeof(reg_burst_read),
.buf = reg_burst_read}
,
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = MAX6900_BURST_LEN,
.buf = buf}
,
{
.addr = client->addr,
.flags = 0, /* write */
.len = sizeof(reg_century_read),
.buf = reg_century_read}
,
{
.addr = client->addr,
.flags = I2C_M_RD,
.len = sizeof(buf[MAX6900_REG_CENTURY]),
.buf = &buf[MAX6900_REG_CENTURY]
}
};
int rc;
rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (rc != ARRAY_SIZE(msgs)) {
dev_err(&client->dev, "%s: register read failed\n", __func__);
return -EIO;
}
return 0;
}
static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf)
{
u8 i2c_century_buf[1 + 1] = { MAX6900_REG_CENTURY_WRITE };
struct i2c_msg century_msgs[1] = {
{
.addr = client->addr,
.flags = 0, /* write */
.len = sizeof(i2c_century_buf),
.buf = i2c_century_buf}
};
u8 i2c_burst_buf[MAX6900_BURST_LEN + 1] = { MAX6900_REG_BURST_WRITE };
struct i2c_msg burst_msgs[1] = {
{
.addr = client->addr,
.flags = 0, /* write */
.len = sizeof(i2c_burst_buf),
.buf = i2c_burst_buf}
};
int rc;
/*
* We have to make separate calls to i2c_transfer because of
* the need to delay after each write to the chip. Also,
* we write the century byte first, since we set the write-protect
* bit as part of the burst write.
*/
i2c_century_buf[1] = buf[MAX6900_REG_CENTURY];
rc = i2c_transfer(client->adapter, century_msgs,
ARRAY_SIZE(century_msgs));
if (rc != ARRAY_SIZE(century_msgs))
goto write_failed;
msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
memcpy(&i2c_burst_buf[1], buf, MAX6900_BURST_LEN);
rc = i2c_transfer(client->adapter, burst_msgs, ARRAY_SIZE(burst_msgs));
if (rc != ARRAY_SIZE(burst_msgs))
goto write_failed;
msleep(MAX6900_IDLE_TIME_AFTER_WRITE);
return 0;
write_failed:
dev_err(&client->dev, "%s: register write failed\n", __func__);
return -EIO;
}
static int max6900_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
{
int rc;
u8 regs[MAX6900_REG_LEN];
rc = max6900_i2c_read_regs(client, regs);
if (rc < 0)
return rc;
tm->tm_sec = bcd2bin(regs[MAX6900_REG_SC]);
tm->tm_min = bcd2bin(regs[MAX6900_REG_MN]);
tm->tm_hour = bcd2bin(regs[MAX6900_REG_HR] & 0x3f);
tm->tm_mday = bcd2bin(regs[MAX6900_REG_DT]);
tm->tm_mon = bcd2bin(regs[MAX6900_REG_MO]) - 1;
tm->tm_year = bcd2bin(regs[MAX6900_REG_YR]) +
bcd2bin(regs[MAX6900_REG_CENTURY]) * 100 - 1900;
tm->tm_wday = bcd2bin(regs[MAX6900_REG_DW]);
return rtc_valid_tm(tm);
}
static int max6900_i2c_clear_write_protect(struct i2c_client *client)
{
int rc;
rc = i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0);
if (rc < 0) {
dev_err(&client->dev, "%s: control register write failed\n",
__func__);
return -EIO;
}
return 0;
}
static int
max6900_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
{
u8 regs[MAX6900_REG_LEN];
int rc;
rc = max6900_i2c_clear_write_protect(client);
if (rc < 0)
return rc;
regs[MAX6900_REG_SC] = bin2bcd(tm->tm_sec);
regs[MAX6900_REG_MN] = bin2bcd(tm->tm_min);
regs[MAX6900_REG_HR] = bin2bcd(tm->tm_hour);
regs[MAX6900_REG_DT] = bin2bcd(tm->tm_mday);
regs[MAX6900_REG_MO] = bin2bcd(tm->tm_mon + 1);
regs[MAX6900_REG_DW] = bin2bcd(tm->tm_wday);
regs[MAX6900_REG_YR] = bin2bcd(tm->tm_year % 100);
regs[MAX6900_REG_CENTURY] = bin2bcd((tm->tm_year + 1900) / 100);
/* set write protect */
regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP;
rc = max6900_i2c_write_regs(client, regs);
if (rc < 0)
return rc;
return 0;
}
static int max6900_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return max6900_i2c_read_time(to_i2c_client(dev), tm);
}
static int max6900_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return max6900_i2c_set_time(to_i2c_client(dev), tm);
}
static const struct rtc_class_ops max6900_rtc_ops = {
.read_time = max6900_rtc_read_time,
.set_time = max6900_rtc_set_time,
};
static int
max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct rtc_device *rtc;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
rtc = devm_rtc_device_register(&client->dev, max6900_driver.driver.name,
&max6900_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
return 0;
}
static struct i2c_device_id max6900_id[] = {
{ "max6900", 0 },
{ }
};
static struct i2c_driver max6900_driver = {
.driver = {
.name = "rtc-max6900",
},
.probe = max6900_probe,
.id_table = max6900_id,
};
module_i2c_driver(max6900_driver);
MODULE_DESCRIPTION("Maxim MAX6900 RTC driver");
MODULE_AUTHOR("Dale Farnsworth <dale@farnsworth.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
zjh3123629/linux-4.5.1 | drivers/staging/rtl8192e/rtl819x_HTProc.c | 410 | 26628 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "rtllib.h"
#include "rtl819x_HT.h"
u8 MCS_FILTER_ALL[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
u8 MCS_FILTER_1SS[16] = {
0xff, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
u16 MCS_DATA_RATE[2][2][77] = {
{{13, 26, 39, 52, 78, 104, 117, 130, 26, 52, 78, 104, 156, 208, 234,
260, 39, 78, 117, 234, 312, 351, 390, 52, 104, 156, 208, 312, 416,
468, 520, 0, 78, 104, 130, 117, 156, 195, 104, 130, 130, 156, 182,
182, 208, 156, 195, 195, 234, 273, 273, 312, 130, 156, 181, 156,
181, 208, 234, 208, 234, 260, 260, 286, 195, 234, 273, 234, 273,
312, 351, 312, 351, 390, 390, 429},
{14, 29, 43, 58, 87, 116, 130, 144, 29, 58, 87, 116, 173, 231, 260, 289,
43, 87, 130, 173, 260, 347, 390, 433, 58, 116, 173, 231, 347, 462, 520,
578, 0, 87, 116, 144, 130, 173, 217, 116, 144, 144, 173, 202, 202, 231,
173, 217, 217, 260, 303, 303, 347, 144, 173, 202, 173, 202, 231, 260,
231, 260, 289, 289, 318, 217, 260, 303, 260, 303, 347, 390, 347, 390,
433, 433, 477} },
{{27, 54, 81, 108, 162, 216, 243, 270, 54, 108, 162, 216, 324, 432, 486,
540, 81, 162, 243, 324, 486, 648, 729, 810, 108, 216, 324, 432, 648,
864, 972, 1080, 12, 162, 216, 270, 243, 324, 405, 216, 270, 270, 324,
378, 378, 432, 324, 405, 405, 486, 567, 567, 648, 270, 324, 378, 324,
378, 432, 486, 432, 486, 540, 540, 594, 405, 486, 567, 486, 567, 648,
729, 648, 729, 810, 810, 891},
{30, 60, 90, 120, 180, 240, 270, 300, 60, 120, 180, 240, 360, 480, 540,
600, 90, 180, 270, 360, 540, 720, 810, 900, 120, 240, 360, 480, 720,
960, 1080, 1200, 13, 180, 240, 300, 270, 360, 450, 240, 300, 300, 360,
420, 420, 480, 360, 450, 450, 540, 630, 630, 720, 300, 360, 420, 360,
420, 480, 540, 480, 540, 600, 600, 660, 450, 540, 630, 540, 630, 720,
810, 720, 810, 900, 900, 990} }
};
static u8 UNKNOWN_BORADCOM[3] = {0x00, 0x14, 0xbf};
static u8 LINKSYSWRT330_LINKSYSWRT300_BROADCOM[3] = {0x00, 0x1a, 0x70};
static u8 LINKSYSWRT350_LINKSYSWRT150_BROADCOM[3] = {0x00, 0x1d, 0x7e};
static u8 BELKINF5D8233V1_RALINK[3] = {0x00, 0x17, 0x3f};
static u8 BELKINF5D82334V3_RALINK[3] = {0x00, 0x1c, 0xdf};
static u8 PCI_RALINK[3] = {0x00, 0x90, 0xcc};
static u8 EDIMAX_RALINK[3] = {0x00, 0x0e, 0x2e};
static u8 AIRLINK_RALINK[3] = {0x00, 0x18, 0x02};
static u8 DLINK_ATHEROS_1[3] = {0x00, 0x1c, 0xf0};
static u8 DLINK_ATHEROS_2[3] = {0x00, 0x21, 0x91};
static u8 CISCO_BROADCOM[3] = {0x00, 0x17, 0x94};
static u8 LINKSYS_MARVELL_4400N[3] = {0x00, 0x14, 0xa4};
void HTUpdateDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
pHTInfo->bAcceptAddbaReq = 1;
pHTInfo->bRegShortGI20MHz = 1;
pHTInfo->bRegShortGI40MHz = 1;
pHTInfo->bRegBW40MHz = 1;
if (pHTInfo->bRegBW40MHz)
pHTInfo->bRegSuppCCK = 1;
else
pHTInfo->bRegSuppCCK = true;
pHTInfo->nAMSDU_MaxSize = 7935UL;
pHTInfo->bAMSDU_Support = 0;
pHTInfo->bAMPDUEnable = 1;
pHTInfo->AMPDU_Factor = 2;
pHTInfo->MPDU_Density = 0;
pHTInfo->SelfMimoPs = 3;
if (pHTInfo->SelfMimoPs == 2)
pHTInfo->SelfMimoPs = 3;
ieee->bTxDisableRateFallBack = 0;
ieee->bTxUseDriverAssingedRate = 0;
ieee->bTxEnableFwCalcDur = 1;
pHTInfo->bRegRT2RTAggregation = 1;
pHTInfo->bRegRxReorderEnable = 1;
pHTInfo->RxReorderWinSize = 64;
pHTInfo->RxReorderPendingTime = 30;
}
static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 is40MHz = (pHTInfo->bCurBW40MHz) ? 1 : 0;
u8 isShortGI = (pHTInfo->bCurBW40MHz) ?
((pHTInfo->bCurShortGI40MHz) ? 1 : 0) :
((pHTInfo->bCurShortGI20MHz) ? 1 : 0);
return MCS_DATA_RATE[is40MHz][isShortGI][(nMcsRate & 0x7f)];
}
u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate)
{
u16 CCKOFDMRate[12] = {0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18,
0x24, 0x30, 0x48, 0x60, 0x6c};
u8 is40MHz = 0;
u8 isShortGI = 0;
if (nDataRate < 12)
return CCKOFDMRate[nDataRate];
if (nDataRate >= 0x10 && nDataRate <= 0x1f) {
is40MHz = 0;
isShortGI = 0;
} else if (nDataRate >= 0x20 && nDataRate <= 0x2f) {
is40MHz = 1;
isShortGI = 0;
} else if (nDataRate >= 0x30 && nDataRate <= 0x3f) {
is40MHz = 0;
isShortGI = 1;
} else if (nDataRate >= 0x40 && nDataRate <= 0x4f) {
is40MHz = 1;
isShortGI = 1;
}
return MCS_DATA_RATE[is40MHz][isShortGI][nDataRate&0xf];
}
bool IsHTHalfNmodeAPs(struct rtllib_device *ieee)
{
bool retValue = false;
struct rtllib_network *net = &ieee->current_network;
if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
(net->ralink_cap_exist))
retValue = true;
else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3) ||
(net->broadcom_cap_exist))
retValue = true;
else if (net->bssht.bdRT2RTAggregation)
retValue = true;
else
retValue = false;
return retValue;
}
static void HTIOTPeerDetermine(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct rtllib_network *net = &ieee->current_network;
if (net->bssht.bdRT2RTAggregation) {
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK;
if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_92SE)
pHTInfo->IOTPeer = HT_IOT_PEER_REALTEK_92SE;
if (net->bssht.RT2RT_HT_Mode & RT_HT_CAP_USE_SOFTAP)
pHTInfo->IOTPeer = HT_IOT_PEER_92U_SOFTAP;
} else if (net->broadcom_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if (!memcmp(net->bssid, UNKNOWN_BORADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT330_LINKSYSWRT300_BROADCOM, 3) ||
!memcmp(net->bssid, LINKSYSWRT350_LINKSYSWRT150_BROADCOM, 3))
pHTInfo->IOTPeer = HT_IOT_PEER_BROADCOM;
else if ((memcmp(net->bssid, BELKINF5D8233V1_RALINK, 3) == 0) ||
(memcmp(net->bssid, BELKINF5D82334V3_RALINK, 3) == 0) ||
(memcmp(net->bssid, PCI_RALINK, 3) == 0) ||
(memcmp(net->bssid, EDIMAX_RALINK, 3) == 0) ||
(memcmp(net->bssid, AIRLINK_RALINK, 3) == 0) ||
net->ralink_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_RALINK;
else if ((net->atheros_cap_exist) ||
(memcmp(net->bssid, DLINK_ATHEROS_1, 3) == 0) ||
(memcmp(net->bssid, DLINK_ATHEROS_2, 3) == 0))
pHTInfo->IOTPeer = HT_IOT_PEER_ATHEROS;
else if ((memcmp(net->bssid, CISCO_BROADCOM, 3) == 0) ||
net->cisco_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_CISCO;
else if ((memcmp(net->bssid, LINKSYS_MARVELL_4400N, 3) == 0) ||
net->marvell_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_MARVELL;
else if (net->airgo_cap_exist)
pHTInfo->IOTPeer = HT_IOT_PEER_AIRGO;
else
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
netdev_dbg(ieee->dev, "IOTPEER: %x\n", pHTInfo->IOTPeer);
}
static u8 HTIOTActIsDisableMCS14(struct rtllib_device *ieee, u8 *PeerMacAddr)
{
return 0;
}
static bool HTIOTActIsDisableMCS15(struct rtllib_device *ieee)
{
return false;
}
static bool HTIOTActIsDisableMCSTwoSpatialStream(struct rtllib_device *ieee)
{
return false;
}
static u8 HTIOTActIsDisableEDCATurbo(struct rtllib_device *ieee,
u8 *PeerMacAddr)
{
return false;
}
static u8 HTIOTActIsMgntUseCCK6M(struct rtllib_device *ieee,
struct rtllib_network *network)
{
u8 retValue = 0;
if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
static u8 HTIOTActIsCCDFsync(struct rtllib_device *ieee)
{
u8 retValue = 0;
if (ieee->pHTInfo->IOTPeer == HT_IOT_PEER_BROADCOM)
retValue = 1;
return retValue;
}
static void HTIOTActDetermineRaFunc(struct rtllib_device *ieee, bool bPeerRx2ss)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
pHTInfo->IOTRaFunc &= HT_IOT_RAFUNC_DISABLE_ALL;
if (pHTInfo->IOTPeer == HT_IOT_PEER_RALINK && !bPeerRx2ss)
pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_PEER_1R;
if (pHTInfo->IOTAction & HT_IOT_ACT_AMSDU_ENABLE)
pHTInfo->IOTRaFunc |= HT_IOT_RAFUNC_TX_AMSDU;
}
void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo)
{
pHTInfo->IOTAction = 0;
pHTInfo->IOTPeer = HT_IOT_PEER_UNKNOWN;
pHTInfo->IOTRaFunc = 0;
}
void HTConstructCapabilityElement(struct rtllib_device *ieee, u8 *posHTCap,
u8 *len, u8 IsEncrypt, bool bAssoc)
{
struct rt_hi_throughput *pHT = ieee->pHTInfo;
struct ht_capab_ele *pCapELE = NULL;
if ((posHTCap == NULL) || (pHT == NULL)) {
netdev_warn(ieee->dev,
"%s(): posHTCap and pHTInfo are null\n", __func__);
return;
}
memset(posHTCap, 0, *len);
if ((bAssoc) && (pHT->ePeerHTSpecVer == HT_SPEC_VER_EWC)) {
u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
memcpy(posHTCap, EWC11NHTCap, sizeof(EWC11NHTCap));
pCapELE = (struct ht_capab_ele *)&(posHTCap[4]);
*len = 30 + 2;
} else {
pCapELE = (struct ht_capab_ele *)posHTCap;
*len = 26 + 2;
}
pCapELE->AdvCoding = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pCapELE->ChlWidth = 0;
else
pCapELE->ChlWidth = (pHT->bRegBW40MHz ? 1 : 0);
pCapELE->MimoPwrSave = pHT->SelfMimoPs;
pCapELE->GreenField = 0;
pCapELE->ShortGI20Mhz = 1;
pCapELE->ShortGI40Mhz = 1;
pCapELE->TxSTBC = 1;
pCapELE->RxSTBC = 0;
pCapELE->DelayBA = 0;
pCapELE->MaxAMSDUSize = (MAX_RECEIVE_BUFFER_SIZE >= 7935) ? 1 : 0;
pCapELE->DssCCk = ((pHT->bRegBW40MHz) ? (pHT->bRegSuppCCK ? 1 : 0) : 0);
pCapELE->PSMP = 0;
pCapELE->LSigTxopProtect = 0;
netdev_dbg(ieee->dev,
"TX HT cap/info ele BW=%d MaxAMSDUSize:%d DssCCk:%d\n",
pCapELE->ChlWidth, pCapELE->MaxAMSDUSize, pCapELE->DssCCk);
if (IsEncrypt) {
pCapELE->MPDUDensity = 7;
pCapELE->MaxRxAMPDUFactor = 2;
} else {
pCapELE->MaxRxAMPDUFactor = 3;
pCapELE->MPDUDensity = 0;
}
memcpy(pCapELE->MCS, ieee->Regdot11HTOperationalRateSet, 16);
memset(&pCapELE->ExtHTCapInfo, 0, 2);
memset(pCapELE->TxBFCap, 0, 4);
pCapELE->ASCap = 0;
if (bAssoc) {
if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS15)
pCapELE->MCS[1] &= 0x7f;
if (pHT->IOTAction & HT_IOT_ACT_DISABLE_MCS14)
pCapELE->MCS[1] &= 0xbf;
if (pHT->IOTAction & HT_IOT_ACT_DISABLE_ALL_2SS)
pCapELE->MCS[1] &= 0x00;
if (pHT->IOTAction & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
pCapELE->ShortGI40Mhz = 0;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
pCapELE->ChlWidth = 0;
pCapELE->MCS[1] = 0;
}
}
}
void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
u8 *len, u8 IsEncrypt)
{
struct rt_hi_throughput *pHT = ieee->pHTInfo;
struct ht_info_ele *pHTInfoEle = (struct ht_info_ele *)posHTInfo;
if ((posHTInfo == NULL) || (pHTInfoEle == NULL)) {
netdev_warn(ieee->dev,
"%s(): posHTInfo and pHTInfoEle are null\n",
__func__);
return;
}
memset(posHTInfo, 0, *len);
if ((ieee->iw_mode == IW_MODE_ADHOC) ||
(ieee->iw_mode == IW_MODE_MASTER)) {
pHTInfoEle->ControlChl = ieee->current_network.channel;
pHTInfoEle->ExtChlOffset = ((pHT->bRegBW40MHz == false) ?
HT_EXTCHNL_OFFSET_NO_EXT :
(ieee->current_network.channel <= 6)
? HT_EXTCHNL_OFFSET_UPPER :
HT_EXTCHNL_OFFSET_LOWER);
pHTInfoEle->RecommemdedTxWidth = pHT->bRegBW40MHz;
pHTInfoEle->RIFS = 0;
pHTInfoEle->PSMPAccessOnly = 0;
pHTInfoEle->SrvIntGranularity = 0;
pHTInfoEle->OptMode = pHT->CurrentOpMode;
pHTInfoEle->NonGFDevPresent = 0;
pHTInfoEle->DualBeacon = 0;
pHTInfoEle->SecondaryBeacon = 0;
pHTInfoEle->LSigTxopProtectFull = 0;
pHTInfoEle->PcoActive = 0;
pHTInfoEle->PcoPhase = 0;
memset(pHTInfoEle->BasicMSC, 0, 16);
*len = 22 + 2;
} else {
*len = 0;
}
}
void HTConstructRT2RTAggElement(struct rtllib_device *ieee, u8 *posRT2RTAgg,
u8 *len)
{
if (posRT2RTAgg == NULL) {
netdev_warn(ieee->dev, "%s(): posRT2RTAgg is null\n", __func__);
return;
}
memset(posRT2RTAgg, 0, *len);
*posRT2RTAgg++ = 0x00;
*posRT2RTAgg++ = 0xe0;
*posRT2RTAgg++ = 0x4c;
*posRT2RTAgg++ = 0x02;
*posRT2RTAgg++ = 0x01;
*posRT2RTAgg = 0x30;
if (ieee->bSupportRemoteWakeUp)
*posRT2RTAgg |= RT_HT_CAP_USE_WOW;
*len = 6 + 2;
}
static u8 HT_PickMCSRate(struct rtllib_device *ieee, u8 *pOperateMCS)
{
u8 i;
if (pOperateMCS == NULL) {
netdev_warn(ieee->dev, "%s(): pOperateMCS is null\n", __func__);
return false;
}
switch (ieee->mode) {
case IEEE_A:
case IEEE_B:
case IEEE_G:
for (i = 0; i <= 15; i++)
pOperateMCS[i] = 0;
break;
case IEEE_N_24G:
case IEEE_N_5G:
pOperateMCS[0] &= RATE_ADPT_1SS_MASK;
pOperateMCS[1] &= RATE_ADPT_2SS_MASK;
pOperateMCS[3] &= RATE_ADPT_MCS32_MASK;
break;
default:
break;
}
return true;
}
u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter)
{
u8 i, j;
u8 bitMap;
u8 mcsRate = 0;
u8 availableMcsRate[16];
if (pMCSRateSet == NULL || pMCSFilter == NULL) {
netdev_warn(ieee->dev,
"%s(): pMCSRateSet and pMCSFilter are null\n",
__func__);
return false;
}
for (i = 0; i < 16; i++)
availableMcsRate[i] = pMCSRateSet[i] & pMCSFilter[i];
for (i = 0; i < 16; i++) {
if (availableMcsRate[i] != 0)
break;
}
if (i == 16)
return false;
for (i = 0; i < 16; i++) {
if (availableMcsRate[i] != 0) {
bitMap = availableMcsRate[i];
for (j = 0; j < 8; j++) {
if ((bitMap%2) != 0) {
if (HTMcsToDataRate(ieee, (8*i+j)) >
HTMcsToDataRate(ieee, mcsRate))
mcsRate = (8*i+j);
}
bitMap >>= 1;
}
}
}
return mcsRate | 0x80;
}
static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
u8 *pOperateMCS)
{
u8 i;
for (i = 0; i <= 15; i++)
pOperateMCS[i] = ieee->Regdot11TxHTOperationalRateSet[i] &
pSupportMCS[i];
HT_PickMCSRate(ieee, pOperateMCS);
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
pOperateMCS[1] = 0;
for (i = 2; i <= 15; i++)
pOperateMCS[i] = 0;
return true;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width Bandwidth,
enum ht_extchnl_offset Offset);
void HTOnAssocRsp(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct ht_capab_ele *pPeerHTCap = NULL;
struct ht_info_ele *pPeerHTInfo = NULL;
u16 nMaxAMSDUSize = 0;
u8 *pMcsFilter = NULL;
static u8 EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};
static u8 EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};
if (pHTInfo->bCurrentHTSupport == false) {
netdev_warn(ieee->dev, "%s(): HT_DISABLE\n", __func__);
return;
}
netdev_dbg(ieee->dev, "%s(): HT_ENABLE\n", __func__);
if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
pPeerHTCap = (struct ht_capab_ele *)(&pHTInfo->PeerHTCapBuf[4]);
else
pPeerHTCap = (struct ht_capab_ele *)(pHTInfo->PeerHTCapBuf);
if (!memcmp(pHTInfo->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (struct ht_info_ele *)
(&pHTInfo->PeerHTInfoBuf[4]);
else
pPeerHTInfo = (struct ht_info_ele *)(pHTInfo->PeerHTInfoBuf);
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("HTOnAssocRsp(): ", DUMP_PREFIX_NONE,
pPeerHTCap, sizeof(struct ht_capab_ele));
#endif
HTSetConnectBwMode(ieee, (enum ht_channel_width)(pPeerHTCap->ChlWidth),
(enum ht_extchnl_offset)(pPeerHTInfo->ExtChlOffset));
pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1) ?
true : false);
pHTInfo->bCurShortGI20MHz = ((pHTInfo->bRegShortGI20MHz) ?
((pPeerHTCap->ShortGI20Mhz == 1) ?
true : false) : false);
pHTInfo->bCurShortGI40MHz = ((pHTInfo->bRegShortGI40MHz) ?
((pPeerHTCap->ShortGI40Mhz == 1) ?
true : false) : false);
pHTInfo->bCurSuppCCK = ((pHTInfo->bRegSuppCCK) ?
((pPeerHTCap->DssCCk == 1) ? true :
false) : false);
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
nMaxAMSDUSize = (pPeerHTCap->MaxAMSDUSize == 0) ? 3839 : 7935;
if (pHTInfo->nAMSDU_MaxSize > nMaxAMSDUSize)
pHTInfo->nCurrent_AMSDU_MaxSize = nMaxAMSDUSize;
else
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
if (ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee)&(SEC_ALG_WEP|SEC_ALG_TKIP))) {
if ((pHTInfo->IOTPeer == HT_IOT_PEER_ATHEROS) ||
(pHTInfo->IOTPeer == HT_IOT_PEER_UNKNOWN))
pHTInfo->bCurrentAMPDUEnable = false;
}
if (!pHTInfo->bRegRT2RTAggregation) {
if (pHTInfo->AMPDU_Factor > pPeerHTCap->MaxRxAMPDUFactor)
pHTInfo->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
} else {
if (ieee->current_network.bssht.bdRT2RTAggregation) {
if (ieee->pairwise_key_type != KEY_TYPE_NA)
pHTInfo->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_64K;
} else {
if (pPeerHTCap->MaxRxAMPDUFactor < HT_AGG_SIZE_32K)
pHTInfo->CurrentAMPDUFactor =
pPeerHTCap->MaxRxAMPDUFactor;
else
pHTInfo->CurrentAMPDUFactor = HT_AGG_SIZE_32K;
}
}
if (pHTInfo->MPDU_Density > pPeerHTCap->MPDUDensity)
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
else
pHTInfo->CurrentMPDUDensity = pPeerHTCap->MPDUDensity;
if (pHTInfo->IOTAction & HT_IOT_ACT_TX_USE_AMSDU_8K) {
pHTInfo->bCurrentAMPDUEnable = false;
pHTInfo->ForcedAMSDUMode = HT_AGG_FORCE_ENABLE;
pHTInfo->ForcedAMSDUMaxSize = 7935;
}
pHTInfo->bCurRxReorderEnable = pHTInfo->bRegRxReorderEnable;
if (pPeerHTCap->MCS[0] == 0)
pPeerHTCap->MCS[0] = 0xff;
HTIOTActDetermineRaFunc(ieee, ((pPeerHTCap->MCS[1]) != 0));
HTFilterMCSRate(ieee, pPeerHTCap->MCS, ieee->dot11HTOperationalRateSet);
pHTInfo->PeerMimoPs = pPeerHTCap->MimoPwrSave;
if (pHTInfo->PeerMimoPs == MIMO_PS_STATIC)
pMcsFilter = MCS_FILTER_1SS;
else
pMcsFilter = MCS_FILTER_ALL;
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
ieee->dot11HTOperationalRateSet, pMcsFilter);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
void HTInitializeHTInfo(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
netdev_vdbg(ieee->dev, "%s()\n", __func__);
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurBW40MHz = false;
pHTInfo->bCurTxBW40MHz = false;
pHTInfo->bCurShortGI20MHz = false;
pHTInfo->bCurShortGI40MHz = false;
pHTInfo->bForcedShortGI = false;
pHTInfo->bCurSuppCCK = true;
pHTInfo->bCurrent_AMSDU_Support = false;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
pHTInfo->CurrentMPDUDensity = pHTInfo->MPDU_Density;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
memset((void *)(&(pHTInfo->SelfHTCap)), 0,
sizeof(pHTInfo->SelfHTCap));
memset((void *)(&(pHTInfo->SelfHTInfo)), 0,
sizeof(pHTInfo->SelfHTInfo));
memset((void *)(&(pHTInfo->PeerHTCapBuf)), 0,
sizeof(pHTInfo->PeerHTCapBuf));
memset((void *)(&(pHTInfo->PeerHTInfoBuf)), 0,
sizeof(pHTInfo->PeerHTInfoBuf));
pHTInfo->bSwBwInProgress = false;
pHTInfo->ePeerHTSpecVer = HT_SPEC_VER_IEEE;
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
pHTInfo->IOTPeer = 0;
pHTInfo->IOTAction = 0;
pHTInfo->IOTRaFunc = 0;
{
u8 *RegHTSuppRateSets = &(ieee->RegHTSuppRateSet[0]);
RegHTSuppRateSets[0] = 0xFF;
RegHTSuppRateSets[1] = 0xFF;
RegHTSuppRateSets[4] = 0x01;
}
}
void HTInitializeBssDesc(struct bss_ht *pBssHT)
{
pBssHT->bdSupportHT = false;
memset(pBssHT->bdHTCapBuf, 0, sizeof(pBssHT->bdHTCapBuf));
pBssHT->bdHTCapLen = 0;
memset(pBssHT->bdHTInfoBuf, 0, sizeof(pBssHT->bdHTInfoBuf));
pBssHT->bdHTInfoLen = 0;
pBssHT->bdHTSpecVer = HT_SPEC_VER_IEEE;
pBssHT->bdRT2RTAggregation = false;
pBssHT->bdRT2RTLongSlotTime = false;
pBssHT->RT2RT_HT_Mode = (enum rt_ht_capability)0;
}
void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
u8 bIOTAction = 0;
netdev_vdbg(ieee->dev, "%s()\n", __func__);
/* unmark bEnableHT flag here is the same reason why unmarked in
* function rtllib_softmac_new_net. WB 2008.09.10
*/
if (pNetwork->bssht.bdSupportHT) {
pHTInfo->bCurrentHTSupport = true;
pHTInfo->ePeerHTSpecVer = pNetwork->bssht.bdHTSpecVer;
if (pNetwork->bssht.bdHTCapLen > 0 &&
pNetwork->bssht.bdHTCapLen <= sizeof(pHTInfo->PeerHTCapBuf))
memcpy(pHTInfo->PeerHTCapBuf,
pNetwork->bssht.bdHTCapBuf,
pNetwork->bssht.bdHTCapLen);
if (pNetwork->bssht.bdHTInfoLen > 0 &&
pNetwork->bssht.bdHTInfoLen <=
sizeof(pHTInfo->PeerHTInfoBuf))
memcpy(pHTInfo->PeerHTInfoBuf,
pNetwork->bssht.bdHTInfoBuf,
pNetwork->bssht.bdHTInfoLen);
if (pHTInfo->bRegRT2RTAggregation) {
pHTInfo->bCurrentRT2RTAggregation =
pNetwork->bssht.bdRT2RTAggregation;
pHTInfo->bCurrentRT2RTLongSlotTime =
pNetwork->bssht.bdRT2RTLongSlotTime;
pHTInfo->RT2RT_HT_Mode = pNetwork->bssht.RT2RT_HT_Mode;
} else {
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
}
HTIOTPeerDetermine(ieee);
pHTInfo->IOTAction = 0;
bIOTAction = HTIOTActIsDisableMCS14(ieee, pNetwork->bssid);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS14;
bIOTAction = HTIOTActIsDisableMCS15(ieee);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_MCS15;
bIOTAction = HTIOTActIsDisableMCSTwoSpatialStream(ieee);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_ALL_2SS;
bIOTAction = HTIOTActIsDisableEDCATurbo(ieee, pNetwork->bssid);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_DISABLE_EDCA_TURBO;
bIOTAction = HTIOTActIsMgntUseCCK6M(ieee, pNetwork);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_MGNT_USE_CCK_6M;
bIOTAction = HTIOTActIsCCDFsync(ieee);
if (bIOTAction)
pHTInfo->IOTAction |= HT_IOT_ACT_CDD_FSYNC;
} else {
pHTInfo->bCurrentHTSupport = false;
pHTInfo->bCurrentRT2RTAggregation = false;
pHTInfo->bCurrentRT2RTLongSlotTime = false;
pHTInfo->RT2RT_HT_Mode = (enum rt_ht_capability)0;
pHTInfo->IOTAction = 0;
pHTInfo->IOTRaFunc = 0;
}
}
void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct ht_info_ele *pPeerHTInfo =
(struct ht_info_ele *)pNetwork->bssht.bdHTInfoBuf;
if (pHTInfo->bCurrentHTSupport) {
if (pNetwork->bssht.bdHTInfoLen != 0)
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
}
EXPORT_SYMBOL(HT_update_self_and_peer_setting);
void HTUseDefaultSetting(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
if (pHTInfo->bEnableHT) {
pHTInfo->bCurrentHTSupport = true;
pHTInfo->bCurSuppCCK = pHTInfo->bRegSuppCCK;
pHTInfo->bCurBW40MHz = pHTInfo->bRegBW40MHz;
pHTInfo->bCurShortGI20MHz = pHTInfo->bRegShortGI20MHz;
pHTInfo->bCurShortGI40MHz = pHTInfo->bRegShortGI40MHz;
if (ieee->iw_mode == IW_MODE_ADHOC)
ieee->current_network.qos_data.active =
ieee->current_network.qos_data.supported;
pHTInfo->bCurrent_AMSDU_Support = pHTInfo->bAMSDU_Support;
pHTInfo->nCurrent_AMSDU_MaxSize = pHTInfo->nAMSDU_MaxSize;
pHTInfo->bCurrentAMPDUEnable = pHTInfo->bAMPDUEnable;
pHTInfo->CurrentAMPDUFactor = pHTInfo->AMPDU_Factor;
pHTInfo->CurrentMPDUDensity = pHTInfo->CurrentMPDUDensity;
HTFilterMCSRate(ieee, ieee->Regdot11TxHTOperationalRateSet,
ieee->dot11HTOperationalRateSet);
ieee->HTHighestOperaRate = HTGetHighestMCSRate(ieee,
ieee->dot11HTOperationalRateSet,
MCS_FILTER_ALL);
ieee->HTCurrentOperaRate = ieee->HTHighestOperaRate;
} else {
pHTInfo->bCurrentHTSupport = false;
}
}
u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame)
{
if (ieee->pHTInfo->bCurrentHTSupport) {
if ((IsQoSDataFrame(pFrame) && Frame_Order(pFrame)) == 1) {
netdev_dbg(ieee->dev, "HT CONTROL FILED EXIST!!\n");
return true;
}
}
return false;
}
static void HTSetConnectBwModeCallback(struct rtllib_device *ieee)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
netdev_vdbg(ieee->dev, "%s()\n", __func__);
if (pHTInfo->bCurBW40MHz) {
if (pHTInfo->CurSTAExtChnlOffset == HT_EXTCHNL_OFFSET_UPPER)
ieee->set_chan(ieee->dev,
ieee->current_network.channel + 2);
else if (pHTInfo->CurSTAExtChnlOffset ==
HT_EXTCHNL_OFFSET_LOWER)
ieee->set_chan(ieee->dev,
ieee->current_network.channel - 2);
else
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20_40,
pHTInfo->CurSTAExtChnlOffset);
} else {
ieee->set_chan(ieee->dev, ieee->current_network.channel);
ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20,
HT_EXTCHNL_OFFSET_NO_EXT);
}
pHTInfo->bSwBwInProgress = false;
}
void HTSetConnectBwMode(struct rtllib_device *ieee,
enum ht_channel_width Bandwidth,
enum ht_extchnl_offset Offset)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
if (pHTInfo->bRegBW40MHz == false)
return;
if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
Bandwidth = HT_CHANNEL_WIDTH_20;
if (pHTInfo->bSwBwInProgress) {
pr_info("%s: bSwBwInProgress!!\n", __func__);
return;
}
if (Bandwidth == HT_CHANNEL_WIDTH_20_40) {
if (ieee->current_network.channel < 2 &&
Offset == HT_EXTCHNL_OFFSET_LOWER)
Offset = HT_EXTCHNL_OFFSET_NO_EXT;
if (Offset == HT_EXTCHNL_OFFSET_UPPER ||
Offset == HT_EXTCHNL_OFFSET_LOWER) {
pHTInfo->bCurBW40MHz = true;
pHTInfo->CurSTAExtChnlOffset = Offset;
} else {
pHTInfo->bCurBW40MHz = false;
pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
} else {
pHTInfo->bCurBW40MHz = false;
pHTInfo->CurSTAExtChnlOffset = HT_EXTCHNL_OFFSET_NO_EXT;
}
pr_info("%s():pHTInfo->bCurBW40MHz:%x\n", __func__,
pHTInfo->bCurBW40MHz);
pHTInfo->bSwBwInProgress = true;
HTSetConnectBwModeCallback(ieee);
}
| gpl-2.0 |
wilesduan/linux-part1 | drivers/staging/lustre/lustre/libcfs/linux/linux-module.c | 410 | 4528 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/libcfs/libcfs.h>
#define LNET_MINOR 240
int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
{
struct libcfs_ioctl_hdr *hdr;
struct libcfs_ioctl_data *data;
int err;
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
return err;
if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
CERROR("PORTALS: version mismatch kernel vs application\n");
return -EINVAL;
}
if (hdr->ioc_len + buf >= end) {
CERROR("PORTALS: user buffer exceeds kernel buffer\n");
return -EINVAL;
}
if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
CERROR("PORTALS: user buffer too small for ioctl\n");
return -EINVAL;
}
err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
return err;
if (libcfs_ioctl_is_invalid(data)) {
CERROR("PORTALS: ioctl not correctly formatted\n");
return -EINVAL;
}
if (data->ioc_inllen1)
data->ioc_inlbuf1 = &data->ioc_bulk[0];
if (data->ioc_inllen2)
data->ioc_inlbuf2 = &data->ioc_bulk[0] +
cfs_size_round(data->ioc_inllen1);
return 0;
}
int libcfs_ioctl_popdata(void *arg, void *data, int size)
{
if (copy_to_user((char *)arg, data, size))
return -EFAULT;
return 0;
}
extern struct cfs_psdev_ops libcfs_psdev_ops;
static int
libcfs_psdev_open(struct inode * inode, struct file * file)
{
struct libcfs_device_userstate **pdu = NULL;
int rc = 0;
if (!inode)
return (-EINVAL);
pdu = (struct libcfs_device_userstate **)&file->private_data;
if (libcfs_psdev_ops.p_open != NULL)
rc = libcfs_psdev_ops.p_open(0, (void *)pdu);
else
return (-EPERM);
return rc;
}
/* called when closing /dev/device */
static int
libcfs_psdev_release(struct inode * inode, struct file * file)
{
struct libcfs_device_userstate *pdu;
int rc = 0;
if (!inode)
return (-EINVAL);
pdu = file->private_data;
if (libcfs_psdev_ops.p_close != NULL)
rc = libcfs_psdev_ops.p_close(0, (void *)pdu);
else
rc = -EPERM;
return rc;
}
static long libcfs_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct cfs_psdev_file pfile;
int rc = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if ( _IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
_IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
_IOC_NR(cmd) > IOC_LIBCFS_MAX_NR ) {
CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
_IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
return (-EINVAL);
}
/* Handle platform-dependent IOC requests */
switch (cmd) {
case IOC_LIBCFS_PANIC:
if (!cfs_capable(CFS_CAP_SYS_BOOT))
return (-EPERM);
panic("debugctl-invoked panic");
return (0);
case IOC_LIBCFS_MEMHOG:
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
/* go thought */
}
pfile.off = 0;
pfile.private_data = file->private_data;
if (libcfs_psdev_ops.p_ioctl != NULL)
rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg);
else
rc = -EPERM;
return (rc);
}
static struct file_operations libcfs_fops = {
.unlocked_ioctl = libcfs_ioctl,
.open = libcfs_psdev_open,
.release = libcfs_psdev_release,
};
struct miscdevice libcfs_dev = {
.minor = LNET_MINOR,
.name = "lnet",
.fops = &libcfs_fops,
};
| gpl-2.0 |
jeffegg/beaglebonepsp | arch/mips/kernel/smp-cmp.c | 666 | 5324 | /*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Chris Dearman (chris@mips.com)
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/amon.h>
#include <asm/gic.h>
static void ipi_call_function(unsigned int cpu)
{
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
gic_send_ipi(plat_ipi_call_int_xlate(cpu));
}
static void ipi_resched(unsigned int cpu)
{
pr_debug("CPU%d: %s cpu %d status %08x\n",
smp_processor_id(), __func__, cpu, read_c0_status());
gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
}
/*
* FIXME: This isn't restricted to CMP
* The SMVP kernel could use GIC interrupts if available
*/
void cmp_send_ipi_single(int cpu, unsigned int action)
{
unsigned long flags;
local_irq_save(flags);
switch (action) {
case SMP_CALL_FUNCTION:
ipi_call_function(cpu);
break;
case SMP_RESCHEDULE_YOURSELF:
ipi_resched(cpu);
break;
}
local_irq_restore(flags);
}
static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
static void cmp_init_secondary(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
/* Assume GIC is present */
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
STATUSF_IP7);
/* Enable per-cpu interrupts: platform specific */
c->core = (read_c0_ebase() >> 1) & 0xff;
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
#endif
}
static void cmp_smp_finish(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
static void cmp_cpus_done(void)
{
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it running
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*/
static void cmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
unsigned long pc = (unsigned long)&smp_bootstrap;
unsigned long a0 = 0;
pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
__func__, cpu);
#if 0
/* Needed? */
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
}
/*
* Common setup before any secondaries are started
*/
void __init cmp_smp_setup(void)
{
int i;
int ncpu = 0;
pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
}
if (cpu_has_mipsmt) {
unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
smp_num_siblings = nvpe;
}
pr_info("Detected %i available secondary CPU(s)\n", ncpu);
}
void __init cmp_prepare_cpus(unsigned int max_cpus)
{
pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
smp_processor_id(), __func__, max_cpus);
/*
* FIXME: some of these options are per-system, some per-core and
* some per-cpu
*/
mips_mt_set_cpuoptions();
}
struct plat_smp_ops cmp_smp_ops = {
.send_ipi_single = cmp_send_ipi_single,
.send_ipi_mask = cmp_send_ipi_mask,
.init_secondary = cmp_init_secondary,
.smp_finish = cmp_smp_finish,
.cpus_done = cmp_cpus_done,
.boot_secondary = cmp_boot_secondary,
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,
};
| gpl-2.0 |
XMelancholy/android_kernel_huawei_h60 | fs/jfs/super.c | 1690 | 23588 | /*
* Copyright (C) International Business Machines Corp., 2000-2004
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/completion.h>
#include <linux/vfs.h>
#include <linux/quotaops.h>
#include <linux/mount.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
#include <linux/posix_acl.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_inode.h"
#include "jfs_metapage.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_acl.h"
#include "jfs_debug.h"
MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
MODULE_LICENSE("GPL");
static struct kmem_cache * jfs_inode_cachep;
static const struct super_operations jfs_super_operations;
static const struct export_operations jfs_export_operations;
static struct file_system_type jfs_fs_type;
#define MAX_COMMIT_THREADS 64
static int commit_threads = 0;
module_param(commit_threads, int, 0);
MODULE_PARM_DESC(commit_threads, "Number of commit threads");
static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
struct task_struct *jfsIOthread;
struct task_struct *jfsSyncThread;
#ifdef CONFIG_JFS_DEBUG
int jfsloglevel = JFS_LOGLEVEL_WARN;
module_param(jfsloglevel, int, 0644);
MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
#endif
static void jfs_handle_error(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
if (sb->s_flags & MS_RDONLY)
return;
updateSuper(sb, FM_DIRTY);
if (sbi->flag & JFS_ERR_PANIC)
panic("JFS (device %s): panic forced after error\n",
sb->s_id);
else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
jfs_err("ERROR: (device %s): remounting filesystem "
"as read-only\n",
sb->s_id);
sb->s_flags |= MS_RDONLY;
}
/* nothing is done for continue beyond marking the superblock dirty */
}
void jfs_error(struct super_block *sb, const char * function, ...)
{
static char error_buf[256];
va_list args;
va_start(args, function);
vsnprintf(error_buf, sizeof(error_buf), function, args);
va_end(args);
pr_err("ERROR: (device %s): %s\n", sb->s_id, error_buf);
jfs_handle_error(sb);
}
static struct inode *jfs_alloc_inode(struct super_block *sb)
{
struct jfs_inode_info *jfs_inode;
jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
if (!jfs_inode)
return NULL;
return &jfs_inode->vfs_inode;
}
static void jfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct jfs_inode_info *ji = JFS_IP(inode);
kmem_cache_free(jfs_inode_cachep, ji);
}
static void jfs_destroy_inode(struct inode *inode)
{
struct jfs_inode_info *ji = JFS_IP(inode);
BUG_ON(!list_empty(&ji->anon_inode_list));
spin_lock_irq(&ji->ag_lock);
if (ji->active_ag != -1) {
struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
atomic_dec(&bmap->db_active[ji->active_ag]);
ji->active_ag = -1;
}
spin_unlock_irq(&ji->ag_lock);
call_rcu(&inode->i_rcu, jfs_i_callback);
}
static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
s64 maxinodes;
struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
jfs_info("In jfs_statfs");
buf->f_type = JFS_SUPER_MAGIC;
buf->f_bsize = sbi->bsize;
buf->f_blocks = sbi->bmap->db_mapsize;
buf->f_bfree = sbi->bmap->db_nfree;
buf->f_bavail = sbi->bmap->db_nfree;
/*
* If we really return the number of allocated & free inodes, some
* applications will fail because they won't see enough free inodes.
* We'll try to calculate some guess as to how many inodes we can
* really allocate
*
* buf->f_files = atomic_read(&imap->im_numinos);
* buf->f_ffree = atomic_read(&imap->im_numfree);
*/
maxinodes = min((s64) atomic_read(&imap->im_numinos) +
((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
<< L2INOSPEREXT), (s64) 0xffffffffLL);
buf->f_files = maxinodes;
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
atomic_read(&imap->im_numfree));
buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
sizeof(sbi->uuid)/2);
buf->f_namelen = JFS_NAME_MAX;
return 0;
}
static void jfs_put_super(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
int rc;
jfs_info("In jfs_put_super");
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
rc = jfs_umount(sb);
if (rc)
jfs_err("jfs_umount failed with return code %d", rc);
unload_nls(sbi->nls_tab);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
iput(sbi->direct_inode);
kfree(sbi);
}
enum {
Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
Opt_discard, Opt_nodiscard, Opt_discard_minblk
};
static const match_table_t tokens = {
{Opt_integrity, "integrity"},
{Opt_nointegrity, "nointegrity"},
{Opt_iocharset, "iocharset=%s"},
{Opt_resize, "resize=%u"},
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
{Opt_ignore, "noquota"},
{Opt_ignore, "quota"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_umask, "umask=%u"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
{Opt_discard_minblk, "discard=%u"},
{Opt_err, NULL}
};
static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
int *flag)
{
void *nls_map = (void *)-1; /* -1: no change; NULL: none */
char *p;
struct jfs_sb_info *sbi = JFS_SBI(sb);
*newLVSize = 0;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_integrity:
*flag &= ~JFS_NOINTEGRITY;
break;
case Opt_nointegrity:
*flag |= JFS_NOINTEGRITY;
break;
case Opt_ignore:
/* Silently ignore the quota options */
/* Don't do anything ;-) */
break;
case Opt_iocharset:
if (nls_map && nls_map != (void *) -1)
unload_nls(nls_map);
if (!strcmp(args[0].from, "none"))
nls_map = NULL;
else {
nls_map = load_nls(args[0].from);
if (!nls_map) {
pr_err("JFS: charset not found\n");
goto cleanup;
}
}
break;
case Opt_resize:
{
char *resize = args[0].from;
*newLVSize = simple_strtoull(resize, &resize, 0);
break;
}
case Opt_resize_nosize:
{
*newLVSize = sb->s_bdev->bd_inode->i_size >>
sb->s_blocksize_bits;
if (*newLVSize == 0)
pr_err("JFS: Cannot determine volume size\n");
break;
}
case Opt_errors:
{
char *errors = args[0].from;
if (!errors || !*errors)
goto cleanup;
if (!strcmp(errors, "continue")) {
*flag &= ~JFS_ERR_REMOUNT_RO;
*flag &= ~JFS_ERR_PANIC;
*flag |= JFS_ERR_CONTINUE;
} else if (!strcmp(errors, "remount-ro")) {
*flag &= ~JFS_ERR_CONTINUE;
*flag &= ~JFS_ERR_PANIC;
*flag |= JFS_ERR_REMOUNT_RO;
} else if (!strcmp(errors, "panic")) {
*flag &= ~JFS_ERR_CONTINUE;
*flag &= ~JFS_ERR_REMOUNT_RO;
*flag |= JFS_ERR_PANIC;
} else {
pr_err("JFS: %s is an invalid error handler\n",
errors);
goto cleanup;
}
break;
}
#ifdef CONFIG_QUOTA
case Opt_quota:
case Opt_usrquota:
*flag |= JFS_USRQUOTA;
break;
case Opt_grpquota:
*flag |= JFS_GRPQUOTA;
break;
#else
case Opt_usrquota:
case Opt_grpquota:
case Opt_quota:
pr_err("JFS: quota operations not supported\n");
break;
#endif
case Opt_uid:
{
char *uid = args[0].from;
uid_t val = simple_strtoul(uid, &uid, 0);
sbi->uid = make_kuid(current_user_ns(), val);
if (!uid_valid(sbi->uid))
goto cleanup;
break;
}
case Opt_gid:
{
char *gid = args[0].from;
gid_t val = simple_strtoul(gid, &gid, 0);
sbi->gid = make_kgid(current_user_ns(), val);
if (!gid_valid(sbi->gid))
goto cleanup;
break;
}
case Opt_umask:
{
char *umask = args[0].from;
sbi->umask = simple_strtoul(umask, &umask, 8);
if (sbi->umask & ~0777) {
pr_err("JFS: Invalid value of umask\n");
goto cleanup;
}
break;
}
case Opt_discard:
{
struct request_queue *q = bdev_get_queue(sb->s_bdev);
/* if set to 1, even copying files will cause
* trimming :O
* -> user has more control over the online trimming
*/
sbi->minblks_trim = 64;
if (blk_queue_discard(q)) {
*flag |= JFS_DISCARD;
} else {
pr_err("JFS: discard option " \
"not supported on device\n");
}
break;
}
case Opt_nodiscard:
*flag &= ~JFS_DISCARD;
break;
case Opt_discard_minblk:
{
struct request_queue *q = bdev_get_queue(sb->s_bdev);
char *minblks_trim = args[0].from;
if (blk_queue_discard(q)) {
*flag |= JFS_DISCARD;
sbi->minblks_trim = simple_strtoull(
minblks_trim, &minblks_trim, 0);
} else {
pr_err("JFS: discard option " \
"not supported on device\n");
}
break;
}
default:
printk("jfs: Unrecognized mount option \"%s\" "
" or missing value\n", p);
goto cleanup;
}
}
if (nls_map != (void *) -1) {
/* Discard old (if remount) */
unload_nls(sbi->nls_tab);
sbi->nls_tab = nls_map;
}
return 1;
cleanup:
if (nls_map && nls_map != (void *) -1)
unload_nls(nls_map);
return 0;
}
static int jfs_remount(struct super_block *sb, int *flags, char *data)
{
s64 newLVSize = 0;
int rc = 0;
int flag = JFS_SBI(sb)->flag;
int ret;
if (!parse_options(data, sb, &newLVSize, &flag)) {
return -EINVAL;
}
if (newLVSize) {
if (sb->s_flags & MS_RDONLY) {
pr_err("JFS: resize requires volume" \
" to be mounted read-write\n");
return -EROFS;
}
rc = jfs_extendfs(sb, newLVSize, 0);
if (rc)
return rc;
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
/*
* Invalidate any previously read metadata. fsck may have
* changed the on-disk data since we mounted r/o
*/
truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
JFS_SBI(sb)->flag = flag;
ret = jfs_mount_rw(sb, 1);
/* mark the fs r/w for quota activity */
sb->s_flags &= ~MS_RDONLY;
dquot_resume(sb, -1);
return ret;
}
if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
rc = dquot_suspend(sb, -1);
if (rc < 0) {
return rc;
}
rc = jfs_umount_rw(sb);
JFS_SBI(sb)->flag = flag;
return rc;
}
if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
if (!(sb->s_flags & MS_RDONLY)) {
rc = jfs_umount_rw(sb);
if (rc)
return rc;
JFS_SBI(sb)->flag = flag;
ret = jfs_mount_rw(sb, 1);
return ret;
}
JFS_SBI(sb)->flag = flag;
return 0;
}
static int jfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct jfs_sb_info *sbi;
struct inode *inode;
int rc;
s64 newLVSize = 0;
int flag, ret = -EINVAL;
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
if (!new_valid_dev(sb->s_bdev->bd_dev))
return -EOVERFLOW;
sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
sb->s_max_links = JFS_LINK_MAX;
sbi->sb = sb;
sbi->uid = INVALID_UID;
sbi->gid = INVALID_GID;
sbi->umask = -1;
/* initialize the mount flag and determine the default error handler */
flag = JFS_ERR_REMOUNT_RO;
if (!parse_options((char *) data, sb, &newLVSize, &flag))
goto out_kfree;
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
if (newLVSize) {
pr_err("resize option for remount only\n");
goto out_kfree;
}
/*
* Initialize blocksize to 4K.
*/
sb_set_blocksize(sb, PSIZE);
/*
* Set method vectors.
*/
sb->s_op = &jfs_super_operations;
sb->s_export_op = &jfs_export_operations;
#ifdef CONFIG_QUOTA
sb->dq_op = &dquot_operations;
sb->s_qcop = &dquot_quotactl_ops;
#endif
/*
* Initialize direct-mapping inode/address-space
*/
inode = new_inode(sb);
if (inode == NULL) {
ret = -ENOMEM;
goto out_unload;
}
inode->i_ino = 0;
inode->i_size = sb->s_bdev->bd_inode->i_size;
inode->i_mapping->a_ops = &jfs_metapage_aops;
insert_inode_hash(inode);
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
sbi->direct_inode = inode;
rc = jfs_mount(sb);
if (rc) {
if (!silent) {
jfs_err("jfs_mount failed w/return code = %d", rc);
}
goto out_mount_failed;
}
if (sb->s_flags & MS_RDONLY)
sbi->log = NULL;
else {
rc = jfs_mount_rw(sb, 0);
if (rc) {
if (!silent) {
jfs_err("jfs_mount_rw failed, return code = %d",
rc);
}
goto out_no_rw;
}
}
sb->s_magic = JFS_SUPER_MAGIC;
if (sbi->mntflag & JFS_OS2)
sb->s_d_op = &jfs_ci_dentry_operations;
inode = jfs_iget(sb, ROOT_I);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out_no_rw;
}
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto out_no_root;
/* logical blocks are represented by 40 bits in pxd_t, etc. */
sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
#if BITS_PER_LONG == 32
/*
* Page cache is indexed by long.
* I would use MAX_LFS_FILESIZE, but it's only half as big
*/
sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes);
#endif
sb->s_time_gran = 1;
return 0;
out_no_root:
jfs_err("jfs_read_super: get root dentry failed");
out_no_rw:
rc = jfs_umount(sb);
if (rc) {
jfs_err("jfs_umount failed with return code %d", rc);
}
out_mount_failed:
filemap_write_and_wait(sbi->direct_inode->i_mapping);
truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
make_bad_inode(sbi->direct_inode);
iput(sbi->direct_inode);
sbi->direct_inode = NULL;
out_unload:
if (sbi->nls_tab)
unload_nls(sbi->nls_tab);
out_kfree:
kfree(sbi);
return ret;
}
static int jfs_freeze(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
int rc = 0;
if (!(sb->s_flags & MS_RDONLY)) {
txQuiesce(sb);
rc = lmLogShutdown(log);
if (rc) {
jfs_error(sb, "jfs_freeze: lmLogShutdown failed");
/* let operations fail rather than hang */
txResume(sb);
return rc;
}
rc = updateSuper(sb, FM_CLEAN);
if (rc) {
jfs_err("jfs_freeze: updateSuper failed\n");
/*
* Don't fail here. Everything succeeded except
* marking the superblock clean, so there's really
* no harm in leaving it frozen for now.
*/
}
}
return 0;
}
static int jfs_unfreeze(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
int rc = 0;
if (!(sb->s_flags & MS_RDONLY)) {
rc = updateSuper(sb, FM_MOUNT);
if (rc) {
jfs_error(sb, "jfs_unfreeze: updateSuper failed");
goto out;
}
rc = lmLogInit(log);
if (rc)
jfs_error(sb, "jfs_unfreeze: lmLogInit failed");
out:
txResume(sb);
}
return rc;
}
static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
}
static int jfs_sync_fs(struct super_block *sb, int wait)
{
struct jfs_log *log = JFS_SBI(sb)->log;
/* log == NULL indicates read-only mount */
if (log) {
/*
* Write quota structures to quota file, sync_blockdev() will
* write them to disk later
*/
dquot_writeback_dquots(sb, -1);
jfs_flush_journal(log, wait);
jfs_syncpt(log, 0);
}
return 0;
}
static int jfs_show_options(struct seq_file *seq, struct dentry *root)
{
struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
if (uid_valid(sbi->uid))
seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
if (gid_valid(sbi->gid))
seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
if (sbi->umask != -1)
seq_printf(seq, ",umask=%03o", sbi->umask);
if (sbi->flag & JFS_NOINTEGRITY)
seq_puts(seq, ",nointegrity");
if (sbi->flag & JFS_DISCARD)
seq_printf(seq, ",discard=%u", sbi->minblks_trim);
if (sbi->nls_tab)
seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
if (sbi->flag & JFS_ERR_CONTINUE)
seq_printf(seq, ",errors=continue");
if (sbi->flag & JFS_ERR_PANIC)
seq_printf(seq, ",errors=panic");
#ifdef CONFIG_QUOTA
if (sbi->flag & JFS_USRQUOTA)
seq_puts(seq, ",usrquota");
if (sbi->flag & JFS_GRPQUOTA)
seq_puts(seq, ",grpquota");
#endif
return 0;
}
#ifdef CONFIG_QUOTA
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
* we don't have to be afraid of races */
static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t toread;
struct buffer_head tmp_bh;
struct buffer_head *bh;
loff_t i_size = i_size_read(inode);
if (off > i_size)
return 0;
if (off+len > i_size)
len = i_size-off;
toread = len;
while (toread > 0) {
tocopy = sb->s_blocksize - offset < toread ?
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
tmp_bh.b_size = 1 << inode->i_blkbits;
err = jfs_get_block(inode, blk, &tmp_bh, 0);
if (err)
return err;
if (!buffer_mapped(&tmp_bh)) /* A hole? */
memset(data, 0, tocopy);
else {
bh = sb_bread(sb, tmp_bh.b_blocknr);
if (!bh)
return -EIO;
memcpy(data, bh->b_data+offset, tocopy);
brelse(bh);
}
offset = 0;
toread -= tocopy;
data += tocopy;
blk++;
}
return len;
}
/* Write to quotafile */
static ssize_t jfs_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct inode *inode = sb_dqopt(sb)->files[type];
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
int offset = off & (sb->s_blocksize - 1);
int tocopy;
size_t towrite = len;
struct buffer_head tmp_bh;
struct buffer_head *bh;
mutex_lock(&inode->i_mutex);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
tmp_bh.b_size = 1 << inode->i_blkbits;
err = jfs_get_block(inode, blk, &tmp_bh, 1);
if (err)
goto out;
if (offset || tocopy != sb->s_blocksize)
bh = sb_bread(sb, tmp_bh.b_blocknr);
else
bh = sb_getblk(sb, tmp_bh.b_blocknr);
if (!bh) {
err = -EIO;
goto out;
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
brelse(bh);
offset = 0;
towrite -= tocopy;
data += tocopy;
blk++;
}
out:
if (len == towrite) {
mutex_unlock(&inode->i_mutex);
return err;
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
mutex_unlock(&inode->i_mutex);
return len - towrite;
}
#endif
static const struct super_operations jfs_super_operations = {
.alloc_inode = jfs_alloc_inode,
.destroy_inode = jfs_destroy_inode,
.dirty_inode = jfs_dirty_inode,
.write_inode = jfs_write_inode,
.evict_inode = jfs_evict_inode,
.put_super = jfs_put_super,
.sync_fs = jfs_sync_fs,
.freeze_fs = jfs_freeze,
.unfreeze_fs = jfs_unfreeze,
.statfs = jfs_statfs,
.remount_fs = jfs_remount,
.show_options = jfs_show_options,
#ifdef CONFIG_QUOTA
.quota_read = jfs_quota_read,
.quota_write = jfs_quota_write,
#endif
};
static const struct export_operations jfs_export_operations = {
.fh_to_dentry = jfs_fh_to_dentry,
.fh_to_parent = jfs_fh_to_parent,
.get_parent = jfs_get_parent,
};
static struct file_system_type jfs_fs_type = {
.owner = THIS_MODULE,
.name = "jfs",
.mount = jfs_do_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("jfs");
static void init_once(void *foo)
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
init_rwsem(&jfs_ip->rdwrlock);
mutex_init(&jfs_ip->commit_mutex);
init_rwsem(&jfs_ip->xattr_sem);
spin_lock_init(&jfs_ip->ag_lock);
jfs_ip->active_ag = -1;
inode_init_once(&jfs_ip->vfs_inode);
}
static int __init init_jfs_fs(void)
{
int i;
int rc;
jfs_inode_cachep =
kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
init_once);
if (jfs_inode_cachep == NULL)
return -ENOMEM;
/*
* Metapage initialization
*/
rc = metapage_init();
if (rc) {
jfs_err("metapage_init failed w/rc = %d", rc);
goto free_slab;
}
/*
* Transaction Manager initialization
*/
rc = txInit();
if (rc) {
jfs_err("txInit failed w/rc = %d", rc);
goto free_metapage;
}
/*
* I/O completion thread (endio)
*/
jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
if (IS_ERR(jfsIOthread)) {
rc = PTR_ERR(jfsIOthread);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
goto end_txmngr;
}
if (commit_threads < 1)
commit_threads = num_online_cpus();
if (commit_threads > MAX_COMMIT_THREADS)
commit_threads = MAX_COMMIT_THREADS;
for (i = 0; i < commit_threads; i++) {
jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit");
if (IS_ERR(jfsCommitThread[i])) {
rc = PTR_ERR(jfsCommitThread[i]);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
commit_threads = i;
goto kill_committask;
}
}
jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
if (IS_ERR(jfsSyncThread)) {
rc = PTR_ERR(jfsSyncThread);
jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
goto kill_committask;
}
#ifdef PROC_FS_JFS
jfs_proc_init();
#endif
rc = register_filesystem(&jfs_fs_type);
if (!rc)
return 0;
#ifdef PROC_FS_JFS
jfs_proc_clean();
#endif
kthread_stop(jfsSyncThread);
kill_committask:
for (i = 0; i < commit_threads; i++)
kthread_stop(jfsCommitThread[i]);
kthread_stop(jfsIOthread);
end_txmngr:
txExit();
free_metapage:
metapage_exit();
free_slab:
kmem_cache_destroy(jfs_inode_cachep);
return rc;
}
static void __exit exit_jfs_fs(void)
{
int i;
jfs_info("exit_jfs_fs called");
txExit();
metapage_exit();
kthread_stop(jfsIOthread);
for (i = 0; i < commit_threads; i++)
kthread_stop(jfsCommitThread[i]);
kthread_stop(jfsSyncThread);
#ifdef PROC_FS_JFS
jfs_proc_clean();
#endif
unregister_filesystem(&jfs_fs_type);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(jfs_inode_cachep);
}
module_init(init_jfs_fs)
module_exit(exit_jfs_fs)
| gpl-2.0 |
sandymanu/sandy_oneplus2_msm8994 | fs/ocfs2/dlmfs/dlmfs.c | 2202 | 17204 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* dlmfs.c
*
* Code which implements the kernel side of a minimal userspace
* interface to our DLM. This file handles the virtual file system
* used for communication with userspace. Credit should go to ramfs,
* which was a template for the fs side of this module.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
/* Simple VFS hooks based on: */
/*
* Resizable simple ram filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/poll.h>
#include <asm/uaccess.h>
#include "stackglue.h"
#include "userdlm.h"
#include "dlmfsver.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
static const struct inode_operations dlmfs_dir_inode_operations;
static const struct inode_operations dlmfs_root_inode_operations;
static const struct inode_operations dlmfs_file_inode_operations;
static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
/*
* These are the ABI capabilities of dlmfs.
*
* Over time, dlmfs has added some features that were not part of the
* initial ABI. Unfortunately, some of these features are not detectable
* via standard usage. For example, Linux's default poll always returns
* POLLIN, so there is no way for a caller of poll(2) to know when dlmfs
* added poll support. Instead, we provide this list of new capabilities.
*
* Capabilities is a read-only attribute. We do it as a module parameter
* so we can discover it whether dlmfs is built in, loaded, or even not
* loaded.
*
* The ABI features are local to this machine's dlmfs mount. This is
* distinct from the locking protocol, which is concerned with inter-node
* interaction.
*
* Capabilities:
* - bast : POLLIN against the file descriptor of a held lock
* signifies a bast fired on the lock.
*/
#define DLMFS_CAPABILITIES "bast stackglue"
static int param_set_dlmfs_capabilities(const char *val,
struct kernel_param *kp)
{
printk(KERN_ERR "%s: readonly parameter\n", kp->name);
return -EINVAL;
}
static int param_get_dlmfs_capabilities(char *buffer,
struct kernel_param *kp)
{
return strlcpy(buffer, DLMFS_CAPABILITIES,
strlen(DLMFS_CAPABILITIES) + 1);
}
module_param_call(capabilities, param_set_dlmfs_capabilities,
param_get_dlmfs_capabilities, NULL, 0444);
MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES);
/*
* decodes a set of open flags into a valid lock level and a set of flags.
* returns < 0 if we have invalid flags
* flags which mean something to us:
* O_RDONLY -> PRMODE level
* O_WRONLY -> EXMODE level
*
* O_NONBLOCK -> NOQUEUE
*/
static int dlmfs_decode_open_flags(int open_flags,
int *level,
int *flags)
{
if (open_flags & (O_WRONLY|O_RDWR))
*level = DLM_LOCK_EX;
else
*level = DLM_LOCK_PR;
*flags = 0;
if (open_flags & O_NONBLOCK)
*flags |= DLM_LKF_NOQUEUE;
return 0;
}
static int dlmfs_file_open(struct inode *inode,
struct file *file)
{
int status, level, flags;
struct dlmfs_filp_private *fp = NULL;
struct dlmfs_inode_private *ip;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino,
file->f_flags);
status = dlmfs_decode_open_flags(file->f_flags, &level, &flags);
if (status < 0)
goto bail;
/* We don't want to honor O_APPEND at read/write time as it
* doesn't make sense for LVB writes. */
file->f_flags &= ~O_APPEND;
fp = kmalloc(sizeof(*fp), GFP_NOFS);
if (!fp) {
status = -ENOMEM;
goto bail;
}
fp->fp_lock_level = level;
ip = DLMFS_I(inode);
status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags);
if (status < 0) {
/* this is a strange error to return here but I want
* to be able userspace to be able to distinguish a
* valid lock request from one that simply couldn't be
* granted. */
if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN)
status = -ETXTBSY;
kfree(fp);
goto bail;
}
file->private_data = fp;
bail:
return status;
}
static int dlmfs_file_release(struct inode *inode,
struct file *file)
{
int level, status;
struct dlmfs_inode_private *ip = DLMFS_I(inode);
struct dlmfs_filp_private *fp = file->private_data;
if (S_ISDIR(inode->i_mode))
BUG();
mlog(0, "close called on inode %lu\n", inode->i_ino);
status = 0;
if (fp) {
level = fp->fp_lock_level;
if (level != DLM_LOCK_IV)
user_dlm_cluster_unlock(&ip->ip_lockres, level);
kfree(fp);
file->private_data = NULL;
}
return 0;
}
/*
* We do ->setattr() just to override size changes. Our size is the size
* of the LVB and nothing else.
*/
static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
{
int error;
struct inode *inode = dentry->d_inode;
attr->ia_valid &= ~ATTR_SIZE;
error = inode_change_ok(inode, attr);
if (error)
return error;
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
}
static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
{
int event = 0;
struct inode *inode = file_inode(file);
struct dlmfs_inode_private *ip = DLMFS_I(inode);
poll_wait(file, &ip->ip_lockres.l_event, wait);
spin_lock(&ip->ip_lockres.l_lock);
if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED)
event = POLLIN | POLLRDNORM;
spin_unlock(&ip->ip_lockres.l_lock);
return event;
}
static ssize_t dlmfs_file_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
ssize_t readlen, got;
char *lvb_buf;
struct inode *inode = file_inode(filp);
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
if (*ppos >= i_size_read(inode))
return 0;
if (!count)
return 0;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
/* don't read past the lvb */
if ((count + *ppos) > i_size_read(inode))
readlen = i_size_read(inode) - *ppos;
else
readlen = count;
lvb_buf = kmalloc(readlen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
got = user_dlm_read_lvb(inode, lvb_buf, readlen);
if (got) {
BUG_ON(got != readlen);
bytes_left = __copy_to_user(buf, lvb_buf, readlen);
readlen -= bytes_left;
} else
readlen = 0;
kfree(lvb_buf);
*ppos = *ppos + readlen;
mlog(0, "read %zd bytes\n", readlen);
return readlen;
}
static ssize_t dlmfs_file_write(struct file *filp,
const char __user *buf,
size_t count,
loff_t *ppos)
{
int bytes_left;
ssize_t writelen;
char *lvb_buf;
struct inode *inode = file_inode(filp);
mlog(0, "inode %lu, count = %zu, *ppos = %llu\n",
inode->i_ino, count, *ppos);
if (*ppos >= i_size_read(inode))
return -ENOSPC;
if (!count)
return 0;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/* don't write past the lvb */
if ((count + *ppos) > i_size_read(inode))
writelen = i_size_read(inode) - *ppos;
else
writelen = count - *ppos;
lvb_buf = kmalloc(writelen, GFP_NOFS);
if (!lvb_buf)
return -ENOMEM;
bytes_left = copy_from_user(lvb_buf, buf, writelen);
writelen -= bytes_left;
if (writelen)
user_dlm_write_lvb(inode, lvb_buf, writelen);
kfree(lvb_buf);
*ppos = *ppos + writelen;
mlog(0, "wrote %zd bytes\n", writelen);
return writelen;
}
static void dlmfs_init_once(void *foo)
{
struct dlmfs_inode_private *ip =
(struct dlmfs_inode_private *) foo;
ip->ip_conn = NULL;
ip->ip_parent = NULL;
inode_init_once(&ip->ip_vfs_inode);
}
static struct inode *dlmfs_alloc_inode(struct super_block *sb)
{
struct dlmfs_inode_private *ip;
ip = kmem_cache_alloc(dlmfs_inode_cache, GFP_NOFS);
if (!ip)
return NULL;
return &ip->ip_vfs_inode;
}
static void dlmfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode));
}
static void dlmfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, dlmfs_i_callback);
}
static void dlmfs_evict_inode(struct inode *inode)
{
int status;
struct dlmfs_inode_private *ip;
clear_inode(inode);
mlog(0, "inode %lu\n", inode->i_ino);
ip = DLMFS_I(inode);
if (S_ISREG(inode->i_mode)) {
status = user_dlm_destroy_lock(&ip->ip_lockres);
if (status < 0)
mlog_errno(status);
iput(ip->ip_parent);
goto clear_fields;
}
mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn);
/* we must be a directory. If required, lets unregister the
* dlm context now. */
if (ip->ip_conn)
user_dlm_unregister(ip->ip_conn);
clear_fields:
ip->ip_parent = NULL;
ip->ip_conn = NULL;
}
static struct backing_dev_info dlmfs_backing_dev_info = {
.name = "ocfs2-dlmfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static struct inode *dlmfs_get_root_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
umode_t mode = S_IFDIR | 0755;
struct dlmfs_inode_private *ip;
if (inode) {
ip = DLMFS_I(inode);
inode->i_ino = get_next_ino();
inode_init_owner(inode, NULL, mode);
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
inode->i_op = &dlmfs_root_inode_operations;
}
return inode;
}
static struct inode *dlmfs_get_inode(struct inode *parent,
struct dentry *dentry,
umode_t mode)
{
struct super_block *sb = parent->i_sb;
struct inode * inode = new_inode(sb);
struct dlmfs_inode_private *ip;
if (!inode)
return NULL;
inode->i_ino = get_next_ino();
inode_init_owner(inode, parent, mode);
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
switch (mode & S_IFMT) {
default:
/* for now we don't support anything other than
* directories and regular files. */
BUG();
break;
case S_IFREG:
inode->i_op = &dlmfs_file_inode_operations;
inode->i_fop = &dlmfs_file_operations;
i_size_write(inode, DLM_LVB_LEN);
user_dlm_lock_res_init(&ip->ip_lockres, dentry);
/* released at clear_inode time, this insures that we
* get to drop the dlm reference on each lock *before*
* we call the unregister code for releasing parent
* directories. */
ip->ip_parent = igrab(parent);
BUG_ON(!ip->ip_parent);
break;
case S_IFDIR:
inode->i_op = &dlmfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink ==
* 2 (for "." entry) */
inc_nlink(inode);
break;
}
return inode;
}
/*
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
static int dlmfs_mkdir(struct inode * dir,
struct dentry * dentry,
umode_t mode)
{
int status;
struct inode *inode = NULL;
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct ocfs2_cluster_connection *conn;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
/* verify that we have a proper domain */
if (domain->len >= GROUP_NAME_MAX) {
status = -EINVAL;
mlog(ML_ERROR, "invalid domain name for directory.\n");
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
ip = DLMFS_I(inode);
conn = user_dlm_register(domain);
if (IS_ERR(conn)) {
status = PTR_ERR(conn);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
status, domain->len, domain->name);
goto bail;
}
ip->ip_conn = conn;
inc_nlink(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
status = 0;
bail:
if (status < 0)
iput(inode);
return status;
}
static int dlmfs_create(struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
{
int status = 0;
struct inode *inode;
struct qstr *name = &dentry->d_name;
mlog(0, "create %.*s\n", name->len, name->name);
/* verify name is valid and doesn't contain any dlm reserved
* characters */
if (name->len >= USER_DLM_LOCK_ID_MAX_LEN ||
name->name[0] == '$') {
status = -EINVAL;
mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len,
name->name);
goto bail;
}
inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG);
if (!inode) {
status = -ENOMEM;
mlog_errno(status);
goto bail;
}
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
bail:
return status;
}
static int dlmfs_unlink(struct inode *dir,
struct dentry *dentry)
{
int status;
struct inode *inode = dentry->d_inode;
mlog(0, "unlink inode %lu\n", inode->i_ino);
/* if there are no current holders, or none that are waiting
* to acquire a lock, this basically destroys our lockres. */
status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres);
if (status < 0) {
mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n",
dentry->d_name.len, dentry->d_name.name, status);
goto bail;
}
status = simple_unlink(dir, dentry);
bail:
return status;
}
static int dlmfs_fill_super(struct super_block * sb,
void * data,
int silent)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = DLMFS_MAGIC;
sb->s_op = &dlmfs_ops;
sb->s_root = d_make_root(dlmfs_get_root_inode(sb));
if (!sb->s_root)
return -ENOMEM;
return 0;
}
static const struct file_operations dlmfs_file_operations = {
.open = dlmfs_file_open,
.release = dlmfs_file_release,
.poll = dlmfs_file_poll,
.read = dlmfs_file_read,
.write = dlmfs_file_write,
.llseek = default_llseek,
};
static const struct inode_operations dlmfs_dir_inode_operations = {
.create = dlmfs_create,
.lookup = simple_lookup,
.unlink = dlmfs_unlink,
};
/* this way we can restrict mkdir to only the toplevel of the fs. */
static const struct inode_operations dlmfs_root_inode_operations = {
.lookup = simple_lookup,
.mkdir = dlmfs_mkdir,
.rmdir = simple_rmdir,
};
static const struct super_operations dlmfs_ops = {
.statfs = simple_statfs,
.alloc_inode = dlmfs_alloc_inode,
.destroy_inode = dlmfs_destroy_inode,
.evict_inode = dlmfs_evict_inode,
.drop_inode = generic_delete_inode,
};
static const struct inode_operations dlmfs_file_inode_operations = {
.getattr = simple_getattr,
.setattr = dlmfs_file_setattr,
};
static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
}
static struct file_system_type dlmfs_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2_dlmfs",
.mount = dlmfs_mount,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("ocfs2_dlmfs");
static int __init init_dlmfs_fs(void)
{
int status;
int cleanup_inode = 0, cleanup_worker = 0;
dlmfs_print_version();
status = bdi_init(&dlmfs_backing_dev_info);
if (status)
return status;
dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache",
sizeof(struct dlmfs_inode_private),
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
dlmfs_init_once);
if (!dlmfs_inode_cache) {
status = -ENOMEM;
goto bail;
}
cleanup_inode = 1;
user_dlm_worker = create_singlethread_workqueue("user_dlm");
if (!user_dlm_worker) {
status = -ENOMEM;
goto bail;
}
cleanup_worker = 1;
user_dlm_set_locking_protocol();
status = register_filesystem(&dlmfs_fs_type);
bail:
if (status) {
if (cleanup_inode)
kmem_cache_destroy(dlmfs_inode_cache);
if (cleanup_worker)
destroy_workqueue(user_dlm_worker);
bdi_destroy(&dlmfs_backing_dev_info);
} else
printk("OCFS2 User DLM kernel interface loaded\n");
return status;
}
static void __exit exit_dlmfs_fs(void)
{
unregister_filesystem(&dlmfs_fs_type);
flush_workqueue(user_dlm_worker);
destroy_workqueue(user_dlm_worker);
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(dlmfs_inode_cache);
bdi_destroy(&dlmfs_backing_dev_info);
}
MODULE_AUTHOR("Oracle");
MODULE_LICENSE("GPL");
module_init(init_dlmfs_fs)
module_exit(exit_dlmfs_fs)
| gpl-2.0 |
sebirdman/kernel-msm | fs/9p/acl.c | 2202 | 9076 | /*
* Copyright IBM Corporation, 2010
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/posix_acl_xattr.h>
#include "xattr.h"
#include "acl.h"
#include "v9fs.h"
#include "v9fs_vfs.h"
#include "fid.h"
static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
{
ssize_t size;
void *value = NULL;
struct posix_acl *acl = NULL;
size = v9fs_fid_xattr_get(fid, name, NULL, 0);
if (size > 0) {
value = kzalloc(size, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
size = v9fs_fid_xattr_get(fid, name, value, size);
if (size > 0) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
if (IS_ERR(acl))
goto err_out;
}
} else if (size == -ENODATA || size == 0 ||
size == -ENOSYS || size == -EOPNOTSUPP) {
acl = NULL;
} else
acl = ERR_PTR(-EIO);
err_out:
kfree(value);
return acl;
}
int v9fs_get_acl(struct inode *inode, struct p9_fid *fid)
{
int retval = 0;
struct posix_acl *pacl, *dacl;
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL);
set_cached_acl(inode, ACL_TYPE_ACCESS, NULL);
return 0;
}
/* get the default/access acl values and cache them */
dacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_DEFAULT);
pacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_ACCESS);
if (!IS_ERR(dacl) && !IS_ERR(pacl)) {
set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
set_cached_acl(inode, ACL_TYPE_ACCESS, pacl);
} else
retval = -EIO;
if (!IS_ERR(dacl))
posix_acl_release(dacl);
if (!IS_ERR(pacl))
posix_acl_release(pacl);
return retval;
}
static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type)
{
struct posix_acl *acl;
/*
* 9p Always cache the acl value when
* instantiating the inode (v9fs_inode_from_fid)
*/
acl = get_cached_acl(inode, type);
BUG_ON(acl == ACL_NOT_CACHED);
return acl;
}
struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type)
{
struct v9fs_session_info *v9ses;
v9ses = v9fs_inode2v9ses(inode);
if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) ||
((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) {
/*
* On access = client and acl = on mode get the acl
* values from the server
*/
return NULL;
}
return v9fs_get_cached_acl(inode, type);
}
static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl)
{
int retval;
char *name;
size_t size;
void *buffer;
if (!acl)
return 0;
/* Set a setxattr request to server */
size = posix_acl_xattr_size(acl->a_count);
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
if (retval < 0)
goto err_free_out;
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
break;
default:
BUG();
}
retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0);
err_free_out:
kfree(buffer);
return retval;
}
int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid)
{
int retval = 0;
struct posix_acl *acl;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS);
if (acl) {
retval = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (retval)
return retval;
set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
posix_acl_release(acl);
}
return retval;
}
int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid,
struct posix_acl *dacl, struct posix_acl *acl)
{
set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl);
set_cached_acl(inode, ACL_TYPE_ACCESS, acl);
v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl);
v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl);
return 0;
}
void v9fs_put_acl(struct posix_acl *dacl,
struct posix_acl *acl)
{
posix_acl_release(dacl);
posix_acl_release(acl);
}
int v9fs_acl_mode(struct inode *dir, umode_t *modep,
struct posix_acl **dpacl, struct posix_acl **pacl)
{
int retval = 0;
umode_t mode = *modep;
struct posix_acl *acl = NULL;
if (!S_ISLNK(mode)) {
acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (!acl)
mode &= ~current_umask();
}
if (acl) {
if (S_ISDIR(mode))
*dpacl = posix_acl_dup(acl);
retval = posix_acl_create(&acl, GFP_NOFS, &mode);
if (retval < 0)
return retval;
if (retval > 0)
*pacl = acl;
else
posix_acl_release(acl);
}
*modep = mode;
return 0;
}
static int v9fs_remote_get_acl(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
char *full_name;
switch (type) {
case ACL_TYPE_ACCESS:
full_name = POSIX_ACL_XATTR_ACCESS;
break;
case ACL_TYPE_DEFAULT:
full_name = POSIX_ACL_XATTR_DEFAULT;
break;
default:
BUG();
}
return v9fs_xattr_get(dentry, full_name, buffer, size);
}
static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
struct v9fs_session_info *v9ses;
struct posix_acl *acl;
int error;
if (strcmp(name, "") != 0)
return -EINVAL;
v9ses = v9fs_dentry2v9ses(dentry);
/*
* We allow set/get/list of acl when access=client is not specified
*/
if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
return v9fs_remote_get_acl(dentry, name, buffer, size, type);
acl = v9fs_get_cached_acl(dentry->d_inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
posix_acl_release(acl);
return error;
}
static int v9fs_remote_set_acl(struct dentry *dentry, const char *name,
const void *value, size_t size,
int flags, int type)
{
char *full_name;
switch (type) {
case ACL_TYPE_ACCESS:
full_name = POSIX_ACL_XATTR_ACCESS;
break;
case ACL_TYPE_DEFAULT:
full_name = POSIX_ACL_XATTR_DEFAULT;
break;
default:
BUG();
}
return v9fs_xattr_set(dentry, full_name, value, size, flags);
}
static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
const void *value, size_t size,
int flags, int type)
{
int retval;
struct posix_acl *acl;
struct v9fs_session_info *v9ses;
struct inode *inode = dentry->d_inode;
if (strcmp(name, "") != 0)
return -EINVAL;
v9ses = v9fs_dentry2v9ses(dentry);
/*
* set the attribute on the remote. Without even looking at the
* xattr value. We leave it to the server to validate
*/
if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT)
return v9fs_remote_set_acl(dentry, name,
value, size, flags, type);
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value) {
/* update the cached acl value */
acl = posix_acl_from_xattr(&init_user_ns, value, size);
if (IS_ERR(acl))
return PTR_ERR(acl);
else if (acl) {
retval = posix_acl_valid(acl);
if (retval)
goto err_out;
}
} else
acl = NULL;
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
if (acl) {
umode_t mode = inode->i_mode;
retval = posix_acl_equiv_mode(acl, &mode);
if (retval < 0)
goto err_out;
else {
struct iattr iattr;
if (retval == 0) {
/*
* ACL can be represented
* by the mode bits. So don't
* update ACL.
*/
acl = NULL;
value = NULL;
size = 0;
}
/* Updte the mode bits */
iattr.ia_mode = ((mode & S_IALLUGO) |
(inode->i_mode & ~S_IALLUGO));
iattr.ia_valid = ATTR_MODE;
/* FIXME should we update ctime ?
* What is the following setxattr update the
* mode ?
*/
v9fs_vfs_setattr_dotl(dentry, &iattr);
}
}
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
if (!S_ISDIR(inode->i_mode)) {
retval = acl ? -EINVAL : 0;
goto err_out;
}
break;
default:
BUG();
}
retval = v9fs_xattr_set(dentry, name, value, size, flags);
if (!retval)
set_cached_acl(inode, type, acl);
err_out:
posix_acl_release(acl);
return retval;
}
const struct xattr_handler v9fs_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.flags = ACL_TYPE_ACCESS,
.get = v9fs_xattr_get_acl,
.set = v9fs_xattr_set_acl,
};
const struct xattr_handler v9fs_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
.get = v9fs_xattr_get_acl,
.set = v9fs_xattr_set_acl,
};
| gpl-2.0 |
breeze101792/linux-dev | arch/arm/mach-s3c24xx/mach-smdk2413.c | 2202 | 3946 | /* linux/arch/arm/mach-s3c2412/mach-smdk2413.c
*
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Thanks to Dimity Andric (TomTom) and Steven Ryu (Samsung) for the
* loans of SMDK2413 to work with.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <asm/hardware/iomd.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
//#include <asm/debug-ll.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
#include <mach/regs-lcd.h>
#include <linux/platform_data/usb-s3c2410_udc.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <mach/fb.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/samsung-time.h>
#include "common.h"
#include "common-smdk.h"
static struct map_desc smdk2413_iodesc[] __initdata = {
};
static struct s3c2410_uartcfg smdk2413_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x03,
.ufcon = 0x51,
},
/* IR port */
[2] = {
.hwport = 2,
.flags = 0,
.ucon = 0x3c5,
.ulcon = 0x43,
.ufcon = 0x51,
}
};
static struct s3c2410_udc_mach_info smdk2413_udc_cfg __initdata = {
.pullup_pin = S3C2410_GPF(2),
};
static struct platform_device *smdk2413_devices[] __initdata = {
&s3c_device_ohci,
&s3c_device_wdt,
&s3c_device_i2c0,
&s3c_device_iis,
&s3c_device_usbgadget,
};
static void __init smdk2413_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
mi->nr_banks=1;
mi->bank[0].start = 0x30000000;
mi->bank[0].size = SZ_64M;
}
}
static void __init smdk2413_map_io(void)
{
s3c24xx_init_io(smdk2413_iodesc, ARRAY_SIZE(smdk2413_iodesc));
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(smdk2413_uartcfgs, ARRAY_SIZE(smdk2413_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void __init smdk2413_machine_init(void)
{ /* Turn off suspend on both USB ports, and switch the
* selectable USB port to USB device mode. */
s3c2410_modify_misccr(S3C2410_MISCCR_USBHOST |
S3C2410_MISCCR_USBSUSPND0 |
S3C2410_MISCCR_USBSUSPND1, 0x0);
s3c24xx_udc_set_platdata(&smdk2413_udc_cfg);
s3c_i2c0_set_platdata(NULL);
platform_add_devices(smdk2413_devices, ARRAY_SIZE(smdk2413_devices));
smdk_machine_init();
}
MACHINE_START(S3C2413, "S3C2413")
/* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.atag_offset = 0x100,
.fixup = smdk2413_fixup,
.init_irq = s3c2412_init_irq,
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.init_time = samsung_timer_init,
.restart = s3c2412_restart,
MACHINE_END
MACHINE_START(SMDK2412, "SMDK2412")
/* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.atag_offset = 0x100,
.fixup = smdk2413_fixup,
.init_irq = s3c2412_init_irq,
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.init_time = samsung_timer_init,
.restart = s3c2412_restart,
MACHINE_END
MACHINE_START(SMDK2413, "SMDK2413")
/* Maintainer: Ben Dooks <ben-linux@fluff.org> */
.atag_offset = 0x100,
.fixup = smdk2413_fixup,
.init_irq = s3c2412_init_irq,
.map_io = smdk2413_map_io,
.init_machine = smdk2413_machine_init,
.init_time = samsung_timer_init,
.restart = s3c2412_restart,
MACHINE_END
| gpl-2.0 |
christiantroy/linux-allwinner | drivers/input/touchscreen/bu21013_ts.c | 2970 | 18133 | /*
* Copyright (C) ST-Ericsson SA 2010
* Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
* License terms:GNU General Public License (GPL) version 2
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <linux/input.h>
#include <linux/input/bu21013.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#define PEN_DOWN_INTR 0
#define MAX_FINGERS 2
#define RESET_DELAY 30
#define PENUP_TIMEOUT (10)
#define DELTA_MIN 16
#define MASK_BITS 0x03
#define SHIFT_8 8
#define SHIFT_2 2
#define LENGTH_OF_BUFFER 11
#define I2C_RETRY_COUNT 5
#define BU21013_SENSORS_BTN_0_7_REG 0x70
#define BU21013_SENSORS_BTN_8_15_REG 0x71
#define BU21013_SENSORS_BTN_16_23_REG 0x72
#define BU21013_X1_POS_MSB_REG 0x73
#define BU21013_X1_POS_LSB_REG 0x74
#define BU21013_Y1_POS_MSB_REG 0x75
#define BU21013_Y1_POS_LSB_REG 0x76
#define BU21013_X2_POS_MSB_REG 0x77
#define BU21013_X2_POS_LSB_REG 0x78
#define BU21013_Y2_POS_MSB_REG 0x79
#define BU21013_Y2_POS_LSB_REG 0x7A
#define BU21013_INT_CLR_REG 0xE8
#define BU21013_INT_MODE_REG 0xE9
#define BU21013_GAIN_REG 0xEA
#define BU21013_OFFSET_MODE_REG 0xEB
#define BU21013_XY_EDGE_REG 0xEC
#define BU21013_RESET_REG 0xED
#define BU21013_CALIB_REG 0xEE
#define BU21013_DONE_REG 0xEF
#define BU21013_SENSOR_0_7_REG 0xF0
#define BU21013_SENSOR_8_15_REG 0xF1
#define BU21013_SENSOR_16_23_REG 0xF2
#define BU21013_POS_MODE1_REG 0xF3
#define BU21013_POS_MODE2_REG 0xF4
#define BU21013_CLK_MODE_REG 0xF5
#define BU21013_IDLE_REG 0xFA
#define BU21013_FILTER_REG 0xFB
#define BU21013_TH_ON_REG 0xFC
#define BU21013_TH_OFF_REG 0xFD
#define BU21013_RESET_ENABLE 0x01
#define BU21013_SENSORS_EN_0_7 0x3F
#define BU21013_SENSORS_EN_8_15 0xFC
#define BU21013_SENSORS_EN_16_23 0x1F
#define BU21013_POS_MODE1_0 0x02
#define BU21013_POS_MODE1_1 0x04
#define BU21013_POS_MODE1_2 0x08
#define BU21013_POS_MODE2_ZERO 0x01
#define BU21013_POS_MODE2_AVG1 0x02
#define BU21013_POS_MODE2_AVG2 0x04
#define BU21013_POS_MODE2_EN_XY 0x08
#define BU21013_POS_MODE2_EN_RAW 0x10
#define BU21013_POS_MODE2_MULTI 0x80
#define BU21013_CLK_MODE_DIV 0x01
#define BU21013_CLK_MODE_EXT 0x02
#define BU21013_CLK_MODE_CALIB 0x80
#define BU21013_IDLET_0 0x01
#define BU21013_IDLET_1 0x02
#define BU21013_IDLET_2 0x04
#define BU21013_IDLET_3 0x08
#define BU21013_IDLE_INTERMIT_EN 0x10
#define BU21013_DELTA_0_6 0x7F
#define BU21013_FILTER_EN 0x80
#define BU21013_INT_MODE_LEVEL 0x00
#define BU21013_INT_MODE_EDGE 0x01
#define BU21013_GAIN_0 0x01
#define BU21013_GAIN_1 0x02
#define BU21013_GAIN_2 0x04
#define BU21013_OFFSET_MODE_DEFAULT 0x00
#define BU21013_OFFSET_MODE_MOVE 0x01
#define BU21013_OFFSET_MODE_DISABLE 0x02
#define BU21013_TH_ON_0 0x01
#define BU21013_TH_ON_1 0x02
#define BU21013_TH_ON_2 0x04
#define BU21013_TH_ON_3 0x08
#define BU21013_TH_ON_4 0x10
#define BU21013_TH_ON_5 0x20
#define BU21013_TH_ON_6 0x40
#define BU21013_TH_ON_7 0x80
#define BU21013_TH_ON_MAX 0xFF
#define BU21013_TH_OFF_0 0x01
#define BU21013_TH_OFF_1 0x02
#define BU21013_TH_OFF_2 0x04
#define BU21013_TH_OFF_3 0x08
#define BU21013_TH_OFF_4 0x10
#define BU21013_TH_OFF_5 0x20
#define BU21013_TH_OFF_6 0x40
#define BU21013_TH_OFF_7 0x80
#define BU21013_TH_OFF_MAX 0xFF
#define BU21013_X_EDGE_0 0x01
#define BU21013_X_EDGE_1 0x02
#define BU21013_X_EDGE_2 0x04
#define BU21013_X_EDGE_3 0x08
#define BU21013_Y_EDGE_0 0x10
#define BU21013_Y_EDGE_1 0x20
#define BU21013_Y_EDGE_2 0x40
#define BU21013_Y_EDGE_3 0x80
#define BU21013_DONE 0x01
#define BU21013_NUMBER_OF_X_SENSORS (6)
#define BU21013_NUMBER_OF_Y_SENSORS (11)
#define DRIVER_TP "bu21013_tp"
/**
* struct bu21013_ts_data - touch panel data structure
* @client: pointer to the i2c client
* @wait: variable to wait_queue_head_t structure
* @touch_stopped: touch stop flag
* @chip: pointer to the touch panel controller
* @in_dev: pointer to the input device structure
* @intr_pin: interrupt pin value
* @regulator: pointer to the Regulator used for touch screen
*
* Touch panel device data structure
*/
struct bu21013_ts_data {
struct i2c_client *client;
wait_queue_head_t wait;
bool touch_stopped;
const struct bu21013_platform_device *chip;
struct input_dev *in_dev;
unsigned int intr_pin;
struct regulator *regulator;
};
/**
* bu21013_read_block_data(): read the touch co-ordinates
* @data: bu21013_ts_data structure pointer
* @buf: byte pointer
*
* Read the touch co-ordinates using i2c read block into buffer
* and returns integer.
*/
static int bu21013_read_block_data(struct bu21013_ts_data *data, u8 *buf)
{
int ret, i;
for (i = 0; i < I2C_RETRY_COUNT; i++) {
ret = i2c_smbus_read_i2c_block_data
(data->client, BU21013_SENSORS_BTN_0_7_REG,
LENGTH_OF_BUFFER, buf);
if (ret == LENGTH_OF_BUFFER)
return 0;
}
return -EINVAL;
}
/**
* bu21013_do_touch_report(): Get the touch co-ordinates
* @data: bu21013_ts_data structure pointer
*
* Get the touch co-ordinates from touch sensor registers and writes
* into device structure and returns integer.
*/
static int bu21013_do_touch_report(struct bu21013_ts_data *data)
{
u8 buf[LENGTH_OF_BUFFER];
unsigned int pos_x[2], pos_y[2];
bool has_x_sensors, has_y_sensors;
int finger_down_count = 0;
int i;
if (data == NULL)
return -EINVAL;
if (bu21013_read_block_data(data, buf) < 0)
return -EINVAL;
has_x_sensors = hweight32(buf[0] & BU21013_SENSORS_EN_0_7);
has_y_sensors = hweight32(((buf[1] & BU21013_SENSORS_EN_8_15) |
((buf[2] & BU21013_SENSORS_EN_16_23) << SHIFT_8)) >> SHIFT_2);
if (!has_x_sensors || !has_y_sensors)
return 0;
for (i = 0; i < MAX_FINGERS; i++) {
const u8 *p = &buf[4 * i + 3];
unsigned int x = p[0] << SHIFT_2 | (p[1] & MASK_BITS);
unsigned int y = p[2] << SHIFT_2 | (p[3] & MASK_BITS);
if (x == 0 || y == 0)
continue;
pos_x[finger_down_count] = x;
pos_y[finger_down_count] = y;
finger_down_count++;
}
if (finger_down_count) {
if (finger_down_count == 2 &&
(abs(pos_x[0] - pos_x[1]) < DELTA_MIN ||
abs(pos_y[0] - pos_y[1]) < DELTA_MIN)) {
return 0;
}
for (i = 0; i < finger_down_count; i++) {
if (data->chip->x_flip)
pos_x[i] = data->chip->touch_x_max - pos_x[i];
if (data->chip->y_flip)
pos_y[i] = data->chip->touch_y_max - pos_y[i];
input_report_abs(data->in_dev,
ABS_MT_POSITION_X, pos_x[i]);
input_report_abs(data->in_dev,
ABS_MT_POSITION_Y, pos_y[i]);
input_mt_sync(data->in_dev);
}
} else
input_mt_sync(data->in_dev);
input_sync(data->in_dev);
return 0;
}
/**
* bu21013_gpio_irq() - gpio thread function for touch interrupt
* @irq: irq value
* @device_data: void pointer
*
* This gpio thread function for touch interrupt
* and returns irqreturn_t.
*/
static irqreturn_t bu21013_gpio_irq(int irq, void *device_data)
{
struct bu21013_ts_data *data = device_data;
struct i2c_client *i2c = data->client;
int retval;
do {
retval = bu21013_do_touch_report(data);
if (retval < 0) {
dev_err(&i2c->dev, "bu21013_do_touch_report failed\n");
return IRQ_NONE;
}
data->intr_pin = data->chip->irq_read_val();
if (data->intr_pin == PEN_DOWN_INTR)
wait_event_timeout(data->wait, data->touch_stopped,
msecs_to_jiffies(2));
} while (!data->intr_pin && !data->touch_stopped);
return IRQ_HANDLED;
}
/**
* bu21013_init_chip() - power on sequence for the bu21013 controller
* @data: device structure pointer
*
* This function is used to power on
* the bu21013 controller and returns integer.
*/
static int bu21013_init_chip(struct bu21013_ts_data *data)
{
int retval;
struct i2c_client *i2c = data->client;
retval = i2c_smbus_write_byte_data(i2c, BU21013_RESET_REG,
BU21013_RESET_ENABLE);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_RESET reg write failed\n");
return retval;
}
msleep(RESET_DELAY);
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_0_7_REG,
BU21013_SENSORS_EN_0_7);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_0_7 reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_8_15_REG,
BU21013_SENSORS_EN_8_15);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_8_15 reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_SENSOR_16_23_REG,
BU21013_SENSORS_EN_16_23);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_SENSOR_16_23 reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE1_REG,
(BU21013_POS_MODE1_0 | BU21013_POS_MODE1_1));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_POS_MODE1 reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_POS_MODE2_REG,
(BU21013_POS_MODE2_ZERO | BU21013_POS_MODE2_AVG1 |
BU21013_POS_MODE2_AVG2 | BU21013_POS_MODE2_EN_RAW |
BU21013_POS_MODE2_MULTI));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_POS_MODE2 reg write failed\n");
return retval;
}
if (data->chip->ext_clk)
retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
(BU21013_CLK_MODE_EXT | BU21013_CLK_MODE_CALIB));
else
retval = i2c_smbus_write_byte_data(i2c, BU21013_CLK_MODE_REG,
(BU21013_CLK_MODE_DIV | BU21013_CLK_MODE_CALIB));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_CLK_MODE reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_IDLE_REG,
(BU21013_IDLET_0 | BU21013_IDLE_INTERMIT_EN));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_IDLE reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_INT_MODE_REG,
BU21013_INT_MODE_LEVEL);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_INT_MODE reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_FILTER_REG,
(BU21013_DELTA_0_6 |
BU21013_FILTER_EN));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_FILTER reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_ON_REG,
BU21013_TH_ON_5);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_TH_ON reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_TH_OFF_REG,
BU21013_TH_OFF_4 | BU21013_TH_OFF_3);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_TH_OFF reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_GAIN_REG,
(BU21013_GAIN_0 | BU21013_GAIN_1));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_GAIN reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_OFFSET_MODE_REG,
BU21013_OFFSET_MODE_DEFAULT);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_OFFSET_MODE reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_XY_EDGE_REG,
(BU21013_X_EDGE_0 | BU21013_X_EDGE_2 |
BU21013_Y_EDGE_1 | BU21013_Y_EDGE_3));
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_XY_EDGE reg write failed\n");
return retval;
}
retval = i2c_smbus_write_byte_data(i2c, BU21013_DONE_REG,
BU21013_DONE);
if (retval < 0) {
dev_err(&i2c->dev, "BU21013_REG_DONE reg write failed\n");
return retval;
}
return 0;
}
/**
* bu21013_free_irq() - frees IRQ registered for touchscreen
* @bu21013_data: device structure pointer
*
* This function signals interrupt thread to stop processing and
* frees interrupt.
*/
static void bu21013_free_irq(struct bu21013_ts_data *bu21013_data)
{
bu21013_data->touch_stopped = true;
wake_up(&bu21013_data->wait);
free_irq(bu21013_data->chip->irq, bu21013_data);
}
/**
* bu21013_probe() - initializes the i2c-client touchscreen driver
* @client: i2c client structure pointer
* @id: i2c device id pointer
*
* This function used to initializes the i2c-client touchscreen
* driver and returns integer.
*/
static int __devinit bu21013_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bu21013_ts_data *bu21013_data;
struct input_dev *in_dev;
const struct bu21013_platform_device *pdata =
client->dev.platform_data;
int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&client->dev, "i2c smbus byte data not supported\n");
return -EIO;
}
if (!pdata) {
dev_err(&client->dev, "platform data not defined\n");
return -EINVAL;
}
bu21013_data = kzalloc(sizeof(struct bu21013_ts_data), GFP_KERNEL);
in_dev = input_allocate_device();
if (!bu21013_data || !in_dev) {
dev_err(&client->dev, "device memory alloc failed\n");
error = -ENOMEM;
goto err_free_mem;
}
bu21013_data->in_dev = in_dev;
bu21013_data->chip = pdata;
bu21013_data->client = client;
bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
if (IS_ERR(bu21013_data->regulator)) {
dev_err(&client->dev, "regulator_get failed\n");
error = PTR_ERR(bu21013_data->regulator);
goto err_free_mem;
}
error = regulator_enable(bu21013_data->regulator);
if (error < 0) {
dev_err(&client->dev, "regulator enable failed\n");
goto err_put_regulator;
}
bu21013_data->touch_stopped = false;
init_waitqueue_head(&bu21013_data->wait);
/* configure the gpio pins */
if (pdata->cs_en) {
error = pdata->cs_en(pdata->cs_pin);
if (error < 0) {
dev_err(&client->dev, "chip init failed\n");
goto err_disable_regulator;
}
}
/* configure the touch panel controller */
error = bu21013_init_chip(bu21013_data);
if (error) {
dev_err(&client->dev, "error in bu21013 config\n");
goto err_cs_disable;
}
/* register the device to input subsystem */
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
in_dev->dev.parent = &client->dev;
__set_bit(EV_SYN, in_dev->evbit);
__set_bit(EV_KEY, in_dev->evbit);
__set_bit(EV_ABS, in_dev->evbit);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
pdata->touch_x_max, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
pdata->touch_y_max, 0, 0);
input_set_drvdata(in_dev, bu21013_data);
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
IRQF_TRIGGER_FALLING | IRQF_SHARED,
DRIVER_TP, bu21013_data);
if (error) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
goto err_cs_disable;
}
error = input_register_device(in_dev);
if (error) {
dev_err(&client->dev, "failed to register input device\n");
goto err_free_irq;
}
device_init_wakeup(&client->dev, pdata->wakeup);
i2c_set_clientdata(client, bu21013_data);
return 0;
err_free_irq:
bu21013_free_irq(bu21013_data);
err_cs_disable:
pdata->cs_dis(pdata->cs_pin);
err_disable_regulator:
regulator_disable(bu21013_data->regulator);
err_put_regulator:
regulator_put(bu21013_data->regulator);
err_free_mem:
input_free_device(in_dev);
kfree(bu21013_data);
return error;
}
/**
* bu21013_remove() - removes the i2c-client touchscreen driver
* @client: i2c client structure pointer
*
* This function uses to remove the i2c-client
* touchscreen driver and returns integer.
*/
static int __devexit bu21013_remove(struct i2c_client *client)
{
struct bu21013_ts_data *bu21013_data = i2c_get_clientdata(client);
bu21013_free_irq(bu21013_data);
bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
input_unregister_device(bu21013_data->in_dev);
regulator_disable(bu21013_data->regulator);
regulator_put(bu21013_data->regulator);
kfree(bu21013_data);
device_init_wakeup(&client->dev, false);
return 0;
}
#ifdef CONFIG_PM
/**
* bu21013_suspend() - suspend the touch screen controller
* @dev: pointer to device structure
*
* This function is used to suspend the
* touch panel controller and returns integer
*/
static int bu21013_suspend(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
struct i2c_client *client = bu21013_data->client;
bu21013_data->touch_stopped = true;
if (device_may_wakeup(&client->dev))
enable_irq_wake(bu21013_data->chip->irq);
else
disable_irq(bu21013_data->chip->irq);
regulator_disable(bu21013_data->regulator);
return 0;
}
/**
* bu21013_resume() - resume the touch screen controller
* @dev: pointer to device structure
*
* This function is used to resume the touch panel
* controller and returns integer.
*/
static int bu21013_resume(struct device *dev)
{
struct bu21013_ts_data *bu21013_data = dev_get_drvdata(dev);
struct i2c_client *client = bu21013_data->client;
int retval;
retval = regulator_enable(bu21013_data->regulator);
if (retval < 0) {
dev_err(&client->dev, "bu21013 regulator enable failed\n");
return retval;
}
retval = bu21013_init_chip(bu21013_data);
if (retval < 0) {
dev_err(&client->dev, "bu21013 controller config failed\n");
return retval;
}
bu21013_data->touch_stopped = false;
if (device_may_wakeup(&client->dev))
disable_irq_wake(bu21013_data->chip->irq);
else
enable_irq(bu21013_data->chip->irq);
return 0;
}
static const struct dev_pm_ops bu21013_dev_pm_ops = {
.suspend = bu21013_suspend,
.resume = bu21013_resume,
};
#endif
static const struct i2c_device_id bu21013_id[] = {
{ DRIVER_TP, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, bu21013_id);
static struct i2c_driver bu21013_driver = {
.driver = {
.name = DRIVER_TP,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &bu21013_dev_pm_ops,
#endif
},
.probe = bu21013_probe,
.remove = __devexit_p(bu21013_remove),
.id_table = bu21013_id,
};
/**
* bu21013_init() - initializes the bu21013 touchscreen driver
*
* This function used to initializes the bu21013
* touchscreen driver and returns integer.
*/
static int __init bu21013_init(void)
{
return i2c_add_driver(&bu21013_driver);
}
/**
* bu21013_exit() - de-initializes the bu21013 touchscreen driver
*
* This function uses to de-initializes the bu21013
* touchscreen driver and returns none.
*/
static void __exit bu21013_exit(void)
{
i2c_del_driver(&bu21013_driver);
}
module_init(bu21013_init);
module_exit(bu21013_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Naveen Kumar G <naveen.gaddipati@stericsson.com>");
MODULE_DESCRIPTION("bu21013 touch screen controller driver");
| gpl-2.0 |
T-Macgnolia/android_kernel_lge_g4stylus-stock | arch/sh/kernel/sh_ksyms_64.c | 4506 | 1488 | /*
* arch/sh/kernel/sh_ksyms_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
EXPORT_SYMBOL(__put_user_asm_b);
EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__put_user_asm_q);
EXPORT_SYMBOL(__get_user_asm_b);
EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
/* Ugh. These come in from libgcc.a at link time. */
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__sdivsi3_1);
DECLARE_EXPORT(__sdivsi3_2);
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__div_table);
| gpl-2.0 |
Galaxy-Tab-S2/android_kernel_samsung_gts210wifi | arch/arm/mach-imx/devices/platform-mxc_rtc.c | 4762 | 1247 | /*
* Copyright (C) 2010-2011 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include "../hardware.h"
#include "devices-common.h"
#define imx_mxc_rtc_data_entry_single(soc, _devid) \
{ \
.devid = _devid, \
.iobase = soc ## _RTC_BASE_ADDR, \
.irq = soc ## _INT_RTC, \
}
#ifdef CONFIG_SOC_IMX31
const struct imx_mxc_rtc_data imx31_mxc_rtc_data __initconst =
imx_mxc_rtc_data_entry_single(MX31, "imx21-rtc");
#endif /* ifdef CONFIG_SOC_IMX31 */
#ifdef CONFIG_SOC_IMX35
const struct imx_mxc_rtc_data imx35_mxc_rtc_data __initconst =
imx_mxc_rtc_data_entry_single(MX35, "imx21-rtc");
#endif /* ifdef CONFIG_SOC_IMX35 */
struct platform_device *__init imx_add_mxc_rtc(
const struct imx_mxc_rtc_data *data)
{
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + SZ_16K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
.end = data->irq,
.flags = IORESOURCE_IRQ,
},
};
return imx_add_platform_device(data->devid, -1,
res, ARRAY_SIZE(res), NULL, 0);
}
| gpl-2.0 |
drod2169/Linux-Kernel | arch/arm/mach-imx/devices/platform-imx21-hcd.c | 4762 | 1127 | /*
* Copyright (C) 2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include "../hardware.h"
#include "devices-common.h"
#define imx_imx21_hcd_data_entry_single(soc) \
{ \
.iobase = soc ## _USBOTG_BASE_ADDR, \
.irq = soc ## _INT_USBHOST, \
}
#ifdef CONFIG_SOC_IMX21
const struct imx_imx21_hcd_data imx21_imx21_hcd_data __initconst =
imx_imx21_hcd_data_entry_single(MX21);
#endif /* ifdef CONFIG_SOC_IMX21 */
struct platform_device *__init imx_add_imx21_hcd(
const struct imx_imx21_hcd_data *data,
const struct mx21_usbh_platform_data *pdata)
{
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + SZ_8K - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
.end = data->irq,
.flags = IORESOURCE_IRQ,
},
};
return imx_add_platform_device_dmamask("imx21-hcd", 0,
res, ARRAY_SIZE(res),
pdata, sizeof(*pdata), DMA_BIT_MASK(32));
}
| gpl-2.0 |
Surge1223/kernel_samsung-jfltevzw-VRUFNC5 | sound/synth/emux/emux_nrpn.c | 14234 | 9938 | /*
* NRPN / SYSEX callbacks for Emu8k/Emu10k1
*
* Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "emux_voice.h"
#include <sound/asoundef.h>
/*
* conversion from NRPN/control parameters to Emu8000 raw parameters
*/
/* NRPN / CC -> Emu8000 parameter converter */
struct nrpn_conv_table {
int control;
int effect;
int (*convert)(int val);
};
/* effect sensitivity */
#define FX_CUTOFF 0
#define FX_RESONANCE 1
#define FX_ATTACK 2
#define FX_RELEASE 3
#define FX_VIBRATE 4
#define FX_VIBDEPTH 5
#define FX_VIBDELAY 6
#define FX_NUMS 7
/*
* convert NRPN/control values
*/
static int send_converted_effect(struct nrpn_conv_table *table, int num_tables,
struct snd_emux_port *port,
struct snd_midi_channel *chan,
int type, int val, int mode)
{
int i, cval;
for (i = 0; i < num_tables; i++) {
if (table[i].control == type) {
cval = table[i].convert(val);
snd_emux_send_effect(port, chan, table[i].effect,
cval, mode);
return 1;
}
}
return 0;
}
#define DEF_FX_CUTOFF 170
#define DEF_FX_RESONANCE 6
#define DEF_FX_ATTACK 50
#define DEF_FX_RELEASE 50
#define DEF_FX_VIBRATE 30
#define DEF_FX_VIBDEPTH 4
#define DEF_FX_VIBDELAY 1500
/* effect sensitivities for GS NRPN:
* adjusted for chaos 8MB soundfonts
*/
static int gs_sense[] =
{
DEF_FX_CUTOFF, DEF_FX_RESONANCE, DEF_FX_ATTACK, DEF_FX_RELEASE,
DEF_FX_VIBRATE, DEF_FX_VIBDEPTH, DEF_FX_VIBDELAY
};
/* effect sensitivies for XG controls:
* adjusted for chaos 8MB soundfonts
*/
static int xg_sense[] =
{
DEF_FX_CUTOFF, DEF_FX_RESONANCE, DEF_FX_ATTACK, DEF_FX_RELEASE,
DEF_FX_VIBRATE, DEF_FX_VIBDEPTH, DEF_FX_VIBDELAY
};
/*
* AWE32 NRPN effects
*/
static int fx_delay(int val);
static int fx_attack(int val);
static int fx_hold(int val);
static int fx_decay(int val);
static int fx_the_value(int val);
static int fx_twice_value(int val);
static int fx_conv_pitch(int val);
static int fx_conv_Q(int val);
/* function for each NRPN */ /* [range] units */
#define fx_env1_delay fx_delay /* [0,5900] 4msec */
#define fx_env1_attack fx_attack /* [0,5940] 1msec */
#define fx_env1_hold fx_hold /* [0,8191] 1msec */
#define fx_env1_decay fx_decay /* [0,5940] 4msec */
#define fx_env1_release fx_decay /* [0,5940] 4msec */
#define fx_env1_sustain fx_the_value /* [0,127] 0.75dB */
#define fx_env1_pitch fx_the_value /* [-127,127] 9.375cents */
#define fx_env1_cutoff fx_the_value /* [-127,127] 56.25cents */
#define fx_env2_delay fx_delay /* [0,5900] 4msec */
#define fx_env2_attack fx_attack /* [0,5940] 1msec */
#define fx_env2_hold fx_hold /* [0,8191] 1msec */
#define fx_env2_decay fx_decay /* [0,5940] 4msec */
#define fx_env2_release fx_decay /* [0,5940] 4msec */
#define fx_env2_sustain fx_the_value /* [0,127] 0.75dB */
#define fx_lfo1_delay fx_delay /* [0,5900] 4msec */
#define fx_lfo1_freq fx_twice_value /* [0,127] 84mHz */
#define fx_lfo1_volume fx_twice_value /* [0,127] 0.1875dB */
#define fx_lfo1_pitch fx_the_value /* [-127,127] 9.375cents */
#define fx_lfo1_cutoff fx_twice_value /* [-64,63] 56.25cents */
#define fx_lfo2_delay fx_delay /* [0,5900] 4msec */
#define fx_lfo2_freq fx_twice_value /* [0,127] 84mHz */
#define fx_lfo2_pitch fx_the_value /* [-127,127] 9.375cents */
#define fx_init_pitch fx_conv_pitch /* [-8192,8192] cents */
#define fx_chorus fx_the_value /* [0,255] -- */
#define fx_reverb fx_the_value /* [0,255] -- */
#define fx_cutoff fx_twice_value /* [0,127] 62Hz */
#define fx_filterQ fx_conv_Q /* [0,127] -- */
static int fx_delay(int val)
{
return (unsigned short)snd_sf_calc_parm_delay(val);
}
static int fx_attack(int val)
{
return (unsigned short)snd_sf_calc_parm_attack(val);
}
static int fx_hold(int val)
{
return (unsigned short)snd_sf_calc_parm_hold(val);
}
static int fx_decay(int val)
{
return (unsigned short)snd_sf_calc_parm_decay(val);
}
static int fx_the_value(int val)
{
return (unsigned short)(val & 0xff);
}
static int fx_twice_value(int val)
{
return (unsigned short)((val * 2) & 0xff);
}
static int fx_conv_pitch(int val)
{
return (short)(val * 4096 / 1200);
}
static int fx_conv_Q(int val)
{
return (unsigned short)((val / 8) & 0xff);
}
static struct nrpn_conv_table awe_effects[] =
{
{ 0, EMUX_FX_LFO1_DELAY, fx_lfo1_delay},
{ 1, EMUX_FX_LFO1_FREQ, fx_lfo1_freq},
{ 2, EMUX_FX_LFO2_DELAY, fx_lfo2_delay},
{ 3, EMUX_FX_LFO2_FREQ, fx_lfo2_freq},
{ 4, EMUX_FX_ENV1_DELAY, fx_env1_delay},
{ 5, EMUX_FX_ENV1_ATTACK,fx_env1_attack},
{ 6, EMUX_FX_ENV1_HOLD, fx_env1_hold},
{ 7, EMUX_FX_ENV1_DECAY, fx_env1_decay},
{ 8, EMUX_FX_ENV1_SUSTAIN, fx_env1_sustain},
{ 9, EMUX_FX_ENV1_RELEASE, fx_env1_release},
{10, EMUX_FX_ENV2_DELAY, fx_env2_delay},
{11, EMUX_FX_ENV2_ATTACK, fx_env2_attack},
{12, EMUX_FX_ENV2_HOLD, fx_env2_hold},
{13, EMUX_FX_ENV2_DECAY, fx_env2_decay},
{14, EMUX_FX_ENV2_SUSTAIN, fx_env2_sustain},
{15, EMUX_FX_ENV2_RELEASE, fx_env2_release},
{16, EMUX_FX_INIT_PITCH, fx_init_pitch},
{17, EMUX_FX_LFO1_PITCH, fx_lfo1_pitch},
{18, EMUX_FX_LFO2_PITCH, fx_lfo2_pitch},
{19, EMUX_FX_ENV1_PITCH, fx_env1_pitch},
{20, EMUX_FX_LFO1_VOLUME, fx_lfo1_volume},
{21, EMUX_FX_CUTOFF, fx_cutoff},
{22, EMUX_FX_FILTERQ, fx_filterQ},
{23, EMUX_FX_LFO1_CUTOFF, fx_lfo1_cutoff},
{24, EMUX_FX_ENV1_CUTOFF, fx_env1_cutoff},
{25, EMUX_FX_CHORUS, fx_chorus},
{26, EMUX_FX_REVERB, fx_reverb},
};
/*
* GS(SC88) NRPN effects; still experimental
*/
/* cutoff: quarter semitone step, max=255 */
static int gs_cutoff(int val)
{
return (val - 64) * gs_sense[FX_CUTOFF] / 50;
}
/* resonance: 0 to 15(max) */
static int gs_filterQ(int val)
{
return (val - 64) * gs_sense[FX_RESONANCE] / 50;
}
/* attack: */
static int gs_attack(int val)
{
return -(val - 64) * gs_sense[FX_ATTACK] / 50;
}
/* decay: */
static int gs_decay(int val)
{
return -(val - 64) * gs_sense[FX_RELEASE] / 50;
}
/* release: */
static int gs_release(int val)
{
return -(val - 64) * gs_sense[FX_RELEASE] / 50;
}
/* vibrato freq: 0.042Hz step, max=255 */
static int gs_vib_rate(int val)
{
return (val - 64) * gs_sense[FX_VIBRATE] / 50;
}
/* vibrato depth: max=127, 1 octave */
static int gs_vib_depth(int val)
{
return (val - 64) * gs_sense[FX_VIBDEPTH] / 50;
}
/* vibrato delay: -0.725msec step */
static int gs_vib_delay(int val)
{
return -(val - 64) * gs_sense[FX_VIBDELAY] / 50;
}
static struct nrpn_conv_table gs_effects[] =
{
{32, EMUX_FX_CUTOFF, gs_cutoff},
{33, EMUX_FX_FILTERQ, gs_filterQ},
{99, EMUX_FX_ENV2_ATTACK, gs_attack},
{100, EMUX_FX_ENV2_DECAY, gs_decay},
{102, EMUX_FX_ENV2_RELEASE, gs_release},
{8, EMUX_FX_LFO1_FREQ, gs_vib_rate},
{9, EMUX_FX_LFO1_VOLUME, gs_vib_depth},
{10, EMUX_FX_LFO1_DELAY, gs_vib_delay},
};
/*
* NRPN events
*/
void
snd_emux_nrpn(void *p, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
struct snd_emux_port *port;
port = p;
if (snd_BUG_ON(!port || !chan))
return;
if (chan->control[MIDI_CTL_NONREG_PARM_NUM_MSB] == 127 &&
chan->control[MIDI_CTL_NONREG_PARM_NUM_LSB] <= 26) {
int val;
/* Win/DOS AWE32 specific NRPNs */
/* both MSB/LSB necessary */
val = (chan->control[MIDI_CTL_MSB_DATA_ENTRY] << 7) |
chan->control[MIDI_CTL_LSB_DATA_ENTRY];
val -= 8192;
send_converted_effect
(awe_effects, ARRAY_SIZE(awe_effects),
port, chan, chan->control[MIDI_CTL_NONREG_PARM_NUM_LSB],
val, EMUX_FX_FLAG_SET);
return;
}
if (port->chset.midi_mode == SNDRV_MIDI_MODE_GS &&
chan->control[MIDI_CTL_NONREG_PARM_NUM_MSB] == 1) {
int val;
/* GS specific NRPNs */
/* only MSB is valid */
val = chan->control[MIDI_CTL_MSB_DATA_ENTRY];
send_converted_effect
(gs_effects, ARRAY_SIZE(gs_effects),
port, chan, chan->control[MIDI_CTL_NONREG_PARM_NUM_LSB],
val, EMUX_FX_FLAG_ADD);
return;
}
}
/*
* XG control effects; still experimental
*/
/* cutoff: quarter semitone step, max=255 */
static int xg_cutoff(int val)
{
return (val - 64) * xg_sense[FX_CUTOFF] / 64;
}
/* resonance: 0(open) to 15(most nasal) */
static int xg_filterQ(int val)
{
return (val - 64) * xg_sense[FX_RESONANCE] / 64;
}
/* attack: */
static int xg_attack(int val)
{
return -(val - 64) * xg_sense[FX_ATTACK] / 64;
}
/* release: */
static int xg_release(int val)
{
return -(val - 64) * xg_sense[FX_RELEASE] / 64;
}
static struct nrpn_conv_table xg_effects[] =
{
{71, EMUX_FX_CUTOFF, xg_cutoff},
{74, EMUX_FX_FILTERQ, xg_filterQ},
{72, EMUX_FX_ENV2_RELEASE, xg_release},
{73, EMUX_FX_ENV2_ATTACK, xg_attack},
};
int
snd_emux_xg_control(struct snd_emux_port *port, struct snd_midi_channel *chan,
int param)
{
return send_converted_effect(xg_effects, ARRAY_SIZE(xg_effects),
port, chan, param,
chan->control[param],
EMUX_FX_FLAG_ADD);
}
/*
* receive sysex
*/
void
snd_emux_sysex(void *p, unsigned char *buf, int len, int parsed,
struct snd_midi_channel_set *chset)
{
struct snd_emux_port *port;
struct snd_emux *emu;
port = p;
if (snd_BUG_ON(!port || !chset))
return;
emu = port->emu;
switch (parsed) {
case SNDRV_MIDI_SYSEX_GS_MASTER_VOLUME:
snd_emux_update_port(port, SNDRV_EMUX_UPDATE_VOLUME);
break;
default:
if (emu->ops.sysex)
emu->ops.sysex(emu, buf, len, parsed, chset);
break;
}
}
| gpl-2.0 |
aznrice/l-preview | sound/synth/emux/emux_effect.c | 14746 | 9584 | /*
* Midi synth routines for the Emu8k/Emu10k1
*
* Copyright (C) 1999 Steve Ratcliffe
* Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de>
*
* Contains code based on awe_wave.c by Takashi Iwai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "emux_voice.h"
#include <linux/slab.h>
#ifdef SNDRV_EMUX_USE_RAW_EFFECT
/*
* effects table
*/
#define xoffsetof(type,tag) ((long)(&((type)NULL)->tag) - (long)(NULL))
#define parm_offset(tag) xoffsetof(struct soundfont_voice_parm *, tag)
#define PARM_IS_BYTE (1 << 0)
#define PARM_IS_WORD (1 << 1)
#define PARM_IS_ALIGNED (3 << 2)
#define PARM_IS_ALIGN_HI (1 << 2)
#define PARM_IS_ALIGN_LO (2 << 2)
#define PARM_IS_SIGNED (1 << 4)
#define PARM_WORD (PARM_IS_WORD)
#define PARM_BYTE_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO)
#define PARM_BYTE_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI)
#define PARM_BYTE (PARM_IS_BYTE)
#define PARM_SIGN_LO (PARM_IS_BYTE|PARM_IS_ALIGN_LO|PARM_IS_SIGNED)
#define PARM_SIGN_HI (PARM_IS_BYTE|PARM_IS_ALIGN_HI|PARM_IS_SIGNED)
static struct emux_parm_defs {
int type; /* byte or word */
int low, high; /* value range */
long offset; /* offset in parameter record (-1 = not written) */
int update; /* flgas for real-time update */
} parm_defs[EMUX_NUM_EFFECTS] = {
{PARM_WORD, 0, 0x8000, parm_offset(moddelay), 0}, /* env1 delay */
{PARM_BYTE_LO, 1, 0x80, parm_offset(modatkhld), 0}, /* env1 attack */
{PARM_BYTE_HI, 0, 0x7e, parm_offset(modatkhld), 0}, /* env1 hold */
{PARM_BYTE_LO, 1, 0x7f, parm_offset(moddcysus), 0}, /* env1 decay */
{PARM_BYTE_LO, 1, 0x7f, parm_offset(modrelease), 0}, /* env1 release */
{PARM_BYTE_HI, 0, 0x7f, parm_offset(moddcysus), 0}, /* env1 sustain */
{PARM_BYTE_HI, 0, 0xff, parm_offset(pefe), 0}, /* env1 pitch */
{PARM_BYTE_LO, 0, 0xff, parm_offset(pefe), 0}, /* env1 fc */
{PARM_WORD, 0, 0x8000, parm_offset(voldelay), 0}, /* env2 delay */
{PARM_BYTE_LO, 1, 0x80, parm_offset(volatkhld), 0}, /* env2 attack */
{PARM_BYTE_HI, 0, 0x7e, parm_offset(volatkhld), 0}, /* env2 hold */
{PARM_BYTE_LO, 1, 0x7f, parm_offset(voldcysus), 0}, /* env2 decay */
{PARM_BYTE_LO, 1, 0x7f, parm_offset(volrelease), 0}, /* env2 release */
{PARM_BYTE_HI, 0, 0x7f, parm_offset(voldcysus), 0}, /* env2 sustain */
{PARM_WORD, 0, 0x8000, parm_offset(lfo1delay), 0}, /* lfo1 delay */
{PARM_BYTE_LO, 0, 0xff, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 freq */
{PARM_SIGN_HI, -128, 127, parm_offset(tremfrq), SNDRV_EMUX_UPDATE_TREMFREQ}, /* lfo1 vol */
{PARM_SIGN_HI, -128, 127, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 pitch */
{PARM_BYTE_LO, 0, 0xff, parm_offset(fmmod), SNDRV_EMUX_UPDATE_FMMOD}, /* lfo1 cutoff */
{PARM_WORD, 0, 0x8000, parm_offset(lfo2delay), 0}, /* lfo2 delay */
{PARM_BYTE_LO, 0, 0xff, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 freq */
{PARM_SIGN_HI, -128, 127, parm_offset(fm2frq2), SNDRV_EMUX_UPDATE_FM2FRQ2}, /* lfo2 pitch */
{PARM_WORD, 0, 0xffff, -1, SNDRV_EMUX_UPDATE_PITCH}, /* initial pitch */
{PARM_BYTE, 0, 0xff, parm_offset(chorus), 0}, /* chorus */
{PARM_BYTE, 0, 0xff, parm_offset(reverb), 0}, /* reverb */
{PARM_BYTE, 0, 0xff, parm_offset(cutoff), SNDRV_EMUX_UPDATE_VOLUME}, /* cutoff */
{PARM_BYTE, 0, 15, parm_offset(filterQ), SNDRV_EMUX_UPDATE_Q}, /* resonance */
{PARM_WORD, 0, 0xffff, -1, 0}, /* sample start */
{PARM_WORD, 0, 0xffff, -1, 0}, /* loop start */
{PARM_WORD, 0, 0xffff, -1, 0}, /* loop end */
{PARM_WORD, 0, 0xffff, -1, 0}, /* coarse sample start */
{PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop start */
{PARM_WORD, 0, 0xffff, -1, 0}, /* coarse loop end */
{PARM_BYTE, 0, 0xff, -1, SNDRV_EMUX_UPDATE_VOLUME}, /* initial attenuation */
};
/* set byte effect value */
static void
effect_set_byte(unsigned char *valp, struct snd_midi_channel *chan, int type)
{
short effect;
struct snd_emux_effect_table *fx = chan->private;
effect = fx->val[type];
if (fx->flag[type] == EMUX_FX_FLAG_ADD) {
if (parm_defs[type].type & PARM_IS_SIGNED)
effect += *(char*)valp;
else
effect += *valp;
}
if (effect < parm_defs[type].low)
effect = parm_defs[type].low;
else if (effect > parm_defs[type].high)
effect = parm_defs[type].high;
*valp = (unsigned char)effect;
}
/* set word effect value */
static void
effect_set_word(unsigned short *valp, struct snd_midi_channel *chan, int type)
{
int effect;
struct snd_emux_effect_table *fx = chan->private;
effect = *(unsigned short*)&fx->val[type];
if (fx->flag[type] == EMUX_FX_FLAG_ADD)
effect += *valp;
if (effect < parm_defs[type].low)
effect = parm_defs[type].low;
else if (effect > parm_defs[type].high)
effect = parm_defs[type].high;
*valp = (unsigned short)effect;
}
/* address offset */
static int
effect_get_offset(struct snd_midi_channel *chan, int lo, int hi, int mode)
{
int addr = 0;
struct snd_emux_effect_table *fx = chan->private;
if (fx->flag[hi])
addr = (short)fx->val[hi];
addr = addr << 15;
if (fx->flag[lo])
addr += (short)fx->val[lo];
if (!(mode & SNDRV_SFNT_SAMPLE_8BITS))
addr /= 2;
return addr;
}
#ifdef CONFIG_SND_SEQUENCER_OSS
/* change effects - for OSS sequencer compatibility */
void
snd_emux_send_effect_oss(struct snd_emux_port *port,
struct snd_midi_channel *chan, int type, int val)
{
int mode;
if (type & 0x40)
mode = EMUX_FX_FLAG_OFF;
else if (type & 0x80)
mode = EMUX_FX_FLAG_ADD;
else
mode = EMUX_FX_FLAG_SET;
type &= 0x3f;
snd_emux_send_effect(port, chan, type, val, mode);
}
#endif
/* Modify the effect value.
* if update is necessary, call emu8000_control
*/
void
snd_emux_send_effect(struct snd_emux_port *port, struct snd_midi_channel *chan,
int type, int val, int mode)
{
int i;
int offset;
unsigned char *srcp, *origp;
struct snd_emux *emu;
struct snd_emux_effect_table *fx;
unsigned long flags;
emu = port->emu;
fx = chan->private;
if (emu == NULL || fx == NULL)
return;
if (type < 0 || type >= EMUX_NUM_EFFECTS)
return;
fx->val[type] = val;
fx->flag[type] = mode;
/* do we need to modify the register in realtime ? */
if (! parm_defs[type].update || (offset = parm_defs[type].offset) < 0)
return;
#ifdef SNDRV_LITTLE_ENDIAN
if (parm_defs[type].type & PARM_IS_ALIGN_HI)
offset++;
#else
if (parm_defs[type].type & PARM_IS_ALIGN_LO)
offset++;
#endif
/* modify the register values */
spin_lock_irqsave(&emu->voice_lock, flags);
for (i = 0; i < emu->max_voices; i++) {
struct snd_emux_voice *vp = &emu->voices[i];
if (!STATE_IS_PLAYING(vp->state) || vp->chan != chan)
continue;
srcp = (unsigned char*)&vp->reg.parm + offset;
origp = (unsigned char*)&vp->zone->v.parm + offset;
if (parm_defs[i].type & PARM_IS_BYTE) {
*srcp = *origp;
effect_set_byte(srcp, chan, type);
} else {
*(unsigned short*)srcp = *(unsigned short*)origp;
effect_set_word((unsigned short*)srcp, chan, type);
}
}
spin_unlock_irqrestore(&emu->voice_lock, flags);
/* activate them */
snd_emux_update_channel(port, chan, parm_defs[type].update);
}
/* copy wavetable registers to voice table */
void
snd_emux_setup_effect(struct snd_emux_voice *vp)
{
struct snd_midi_channel *chan = vp->chan;
struct snd_emux_effect_table *fx;
unsigned char *srcp;
int i;
if (! (fx = chan->private))
return;
/* modify the register values via effect table */
for (i = 0; i < EMUX_FX_END; i++) {
int offset;
if (! fx->flag[i] || (offset = parm_defs[i].offset) < 0)
continue;
#ifdef SNDRV_LITTLE_ENDIAN
if (parm_defs[i].type & PARM_IS_ALIGN_HI)
offset++;
#else
if (parm_defs[i].type & PARM_IS_ALIGN_LO)
offset++;
#endif
srcp = (unsigned char*)&vp->reg.parm + offset;
if (parm_defs[i].type & PARM_IS_BYTE)
effect_set_byte(srcp, chan, i);
else
effect_set_word((unsigned short*)srcp, chan, i);
}
/* correct sample and loop points */
vp->reg.start += effect_get_offset(chan, EMUX_FX_SAMPLE_START,
EMUX_FX_COARSE_SAMPLE_START,
vp->reg.sample_mode);
vp->reg.loopstart += effect_get_offset(chan, EMUX_FX_LOOP_START,
EMUX_FX_COARSE_LOOP_START,
vp->reg.sample_mode);
vp->reg.loopend += effect_get_offset(chan, EMUX_FX_LOOP_END,
EMUX_FX_COARSE_LOOP_END,
vp->reg.sample_mode);
}
/*
* effect table
*/
void
snd_emux_create_effect(struct snd_emux_port *p)
{
int i;
p->effect = kcalloc(p->chset.max_channels,
sizeof(struct snd_emux_effect_table), GFP_KERNEL);
if (p->effect) {
for (i = 0; i < p->chset.max_channels; i++)
p->chset.channels[i].private = p->effect + i;
} else {
for (i = 0; i < p->chset.max_channels; i++)
p->chset.channels[i].private = NULL;
}
}
void
snd_emux_delete_effect(struct snd_emux_port *p)
{
kfree(p->effect);
p->effect = NULL;
}
void
snd_emux_clear_effect(struct snd_emux_port *p)
{
if (p->effect) {
memset(p->effect, 0, sizeof(struct snd_emux_effect_table) *
p->chset.max_channels);
}
}
#endif /* SNDRV_EMUX_USE_RAW_EFFECT */
| gpl-2.0 |
gwindlord/android_kernel_lenovo_b8000 | sound/core/oss/copy.c | 15514 | 2883 | /*
* Linear conversion Plug-In
* Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org>
*
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "pcm_plugin.h"
static snd_pcm_sframes_t copy_transfer(struct snd_pcm_plugin *plugin,
const struct snd_pcm_plugin_channel *src_channels,
struct snd_pcm_plugin_channel *dst_channels,
snd_pcm_uframes_t frames)
{
unsigned int channel;
unsigned int nchannels;
if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
return -ENXIO;
if (frames == 0)
return 0;
nchannels = plugin->src_format.channels;
for (channel = 0; channel < nchannels; channel++) {
if (snd_BUG_ON(src_channels->area.first % 8 ||
src_channels->area.step % 8))
return -ENXIO;
if (snd_BUG_ON(dst_channels->area.first % 8 ||
dst_channels->area.step % 8))
return -ENXIO;
if (!src_channels->enabled) {
if (dst_channels->wanted)
snd_pcm_area_silence(&dst_channels->area, 0, frames, plugin->dst_format.format);
dst_channels->enabled = 0;
continue;
}
dst_channels->enabled = 1;
snd_pcm_area_copy(&src_channels->area, 0, &dst_channels->area, 0, frames, plugin->src_format.format);
src_channels++;
dst_channels++;
}
return frames;
}
int snd_pcm_plugin_build_copy(struct snd_pcm_substream *plug,
struct snd_pcm_plugin_format *src_format,
struct snd_pcm_plugin_format *dst_format,
struct snd_pcm_plugin **r_plugin)
{
int err;
struct snd_pcm_plugin *plugin;
int width;
if (snd_BUG_ON(!r_plugin))
return -ENXIO;
*r_plugin = NULL;
if (snd_BUG_ON(src_format->format != dst_format->format))
return -ENXIO;
if (snd_BUG_ON(src_format->rate != dst_format->rate))
return -ENXIO;
if (snd_BUG_ON(src_format->channels != dst_format->channels))
return -ENXIO;
width = snd_pcm_format_physical_width(src_format->format);
if (snd_BUG_ON(width <= 0))
return -ENXIO;
err = snd_pcm_plugin_build(plug, "copy", src_format, dst_format,
0, &plugin);
if (err < 0)
return err;
plugin->transfer = copy_transfer;
*r_plugin = plugin;
return 0;
}
| gpl-2.0 |
crystax/android-toolchain-gcc-4-9 | gcc/testsuite/gfortran.dg/io_constraints_2.f90 | 155 | 2703 | ! { dg-do compile }
! { dg-options "-std=f95" }
! Part II of the test of the IO constraints patch, which fixes PRs:
! PRs 25053, 25063, 25064, 25066, 25067, 25068, 25069, 25307 and 20862.
! Modified2006-07-08 to check the patch for PR20844.
!
! Contributed by Paul Thomas <pault@gcc.gnu.org>
!
module global
integer :: modvar
namelist /NL/ modvar
contains
subroutine foo (i)
integer :: i
write (*, 100) i
100 format (1h , "i=", i6) ! { dg-warning "H format specifier" }
end subroutine foo
end module global
use global
integer :: a,b, c(20)
integer(8) :: ierr
character(80) :: buffer(3)
! Appending to a USE associated namelist is an extension.
NAMELIST /NL/ a,b ! { dg-error "already is USE associated" }
a=1 ; b=2
write(*, NML=NL) z ! { dg-error "followed by IO-list" }
!Was correctly picked up before patch.
print NL, z ! { dg-error "PRINT namelist at \\(1\\) is an extension" }
!
! Not allowed with internal unit
!Was correctly picked up before patch.
write(buffer, NML=NL) ! { dg-error "Internal file at \\(1\\) with namelist" }
!Was correctly picked up before patch.
write(buffer, fmt='(i6)', REC=10) a ! { dg-error "REC tag" }
write(buffer, fmt='(i6)', END=10) a ! { dg-error "END tag" }
! Not allowed with REC= specifier
!Was correctly picked up before patch.
read(10, REC=10, END=100) ! { dg-error "END tag is not allowed" }
write(*, *, REC=10) ! { dg-error "FMT=" }
! Not allowed with an ADVANCE=specifier
READ(buffer, fmt='(i6)', advance='YES') a ! { dg-error "internal file" }
READ(1, NML=NL, advance='YES') ! { dg-error "NAMELIST IO is not allowed" }
READ(1, fmt='(i6)', advance='NO', size = ierr) ! { dg-error "requires default INTEGER" }
READ(1, advance='YES') ! { dg-error "must appear with an explicit format" }
write(1, fmt='(i6)', advance='YES', size = c(1)) a ! { dg-error "output" }
write(1, fmt='(i6)', advance='YES', eor = 100) a ! { dg-error "output" }
read(1, fmt='(i6)', advance='YES', size = c(1)) a ! { dg-error "ADVANCE = 'NO'" }
read(1, fmt='(i6)', advance='YES', eor = 100) a ! { dg-error "ADVANCE = 'NO'" }
READ(1, fmt='(i6)', advance='NO', size = buffer) a ! { dg-error "INTEGER" }
!Was correctly picked up before patch. -correct syntax error
READ(1, fmt='(i6)', advance='YES', size = 10) a ! { dg-error "Invalid value for SIZE specification" }
READ(1, fmt='(i6)', advance='MAYBE') ! { dg-error "YES or NO" }
100 continue
200 format (2i6)
END
| gpl-2.0 |
peter-65/rpi-linux | arch/arm/mach-imx/clk-imx51-imx53.c | 411 | 32212 | /*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <dt-bindings/clock/imx5-clock.h>
#include "clk.h"
#include "common.h"
#include "hardware.h"
#define MX51_DPLL1_BASE 0x83f80000
#define MX51_DPLL2_BASE 0x83f84000
#define MX51_DPLL3_BASE 0x83f88000
#define MX53_DPLL1_BASE 0x63f80000
#define MX53_DPLL2_BASE 0x63f84000
#define MX53_DPLL3_BASE 0x63f88000
#define MX53_DPLL4_BASE 0x63f8c000
#define MXC_CCM_CCR (ccm_base + 0x00)
#define MXC_CCM_CCDR (ccm_base + 0x04)
#define MXC_CCM_CSR (ccm_base + 0x08)
#define MXC_CCM_CCSR (ccm_base + 0x0c)
#define MXC_CCM_CACRR (ccm_base + 0x10)
#define MXC_CCM_CBCDR (ccm_base + 0x14)
#define MXC_CCM_CBCMR (ccm_base + 0x18)
#define MXC_CCM_CSCMR1 (ccm_base + 0x1c)
#define MXC_CCM_CSCMR2 (ccm_base + 0x20)
#define MXC_CCM_CSCDR1 (ccm_base + 0x24)
#define MXC_CCM_CS1CDR (ccm_base + 0x28)
#define MXC_CCM_CS2CDR (ccm_base + 0x2c)
#define MXC_CCM_CDCDR (ccm_base + 0x30)
#define MXC_CCM_CHSCDR (ccm_base + 0x34)
#define MXC_CCM_CSCDR2 (ccm_base + 0x38)
#define MXC_CCM_CSCDR3 (ccm_base + 0x3c)
#define MXC_CCM_CSCDR4 (ccm_base + 0x40)
#define MXC_CCM_CWDR (ccm_base + 0x44)
#define MXC_CCM_CDHIPR (ccm_base + 0x48)
#define MXC_CCM_CDCR (ccm_base + 0x4c)
#define MXC_CCM_CTOR (ccm_base + 0x50)
#define MXC_CCM_CLPCR (ccm_base + 0x54)
#define MXC_CCM_CISR (ccm_base + 0x58)
#define MXC_CCM_CIMR (ccm_base + 0x5c)
#define MXC_CCM_CCOSR (ccm_base + 0x60)
#define MXC_CCM_CGPR (ccm_base + 0x64)
#define MXC_CCM_CCGR0 (ccm_base + 0x68)
#define MXC_CCM_CCGR1 (ccm_base + 0x6c)
#define MXC_CCM_CCGR2 (ccm_base + 0x70)
#define MXC_CCM_CCGR3 (ccm_base + 0x74)
#define MXC_CCM_CCGR4 (ccm_base + 0x78)
#define MXC_CCM_CCGR5 (ccm_base + 0x7c)
#define MXC_CCM_CCGR6 (ccm_base + 0x80)
#define MXC_CCM_CCGR7 (ccm_base + 0x84)
/* Low-power Audio Playback Mode clock */
static const char *lp_apm_sel[] = { "osc", };
/* This is used multiple times */
static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
static const char *per_root_sel[] = { "per_podf", "ipg", };
static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
static const char *emi_slow_sel[] = { "main_bus", "ahb", };
static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0_gate", };
static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1_gate", };
static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
static const char *mx51_tve_sel[] = { "tve_pred", "tve_ext_sel", };
static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *gpu3d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *gpu2d_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb" };
static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
static const char *mx53_can_sel[] = { "ipg", "ckih1", "ckih2", "lp_apm", };
static const char *mx53_cko1_sel[] = {
"cpu_podf", "pll1_sw", "pll2_sw", "pll3_sw",
"emi_slow_podf", "pll4_sw", "nfc_podf", "dummy",
"di_pred", "dummy", "dummy", "ahb",
"ipg", "per_root", "ckil", "dummy",};
static const char *mx53_cko2_sel[] = {
"dummy"/* dptc_core */, "dummy"/* dptc_perich */,
"dummy", "esdhc_a_podf",
"usboh3_podf", "dummy"/* wrck_clk_root */,
"ecspi_podf", "dummy"/* pll1_ref_clk */,
"esdhc_b_podf", "dummy"/* ddr_clk_root */,
"dummy"/* arm_axi_clk_root */, "dummy"/* usb_phy_out */,
"vpu_sel", "ipu_sel",
"osc", "ckih1",
"dummy", "esdhc_c_sel",
"ssi1_root_podf", "ssi2_root_podf",
"dummy", "dummy",
"dummy"/* lpsr_clk_root */, "dummy"/* pgc_clk_root */,
"dummy"/* tve_out */, "usb_phy_sel",
"tve_sel", "lp_apm",
"uart_root", "dummy"/* spdif0_clk_root */,
"dummy", "dummy", };
static const char *mx51_spdif_xtal_sel[] = { "osc", "ckih", "ckih2", };
static const char *mx53_spdif_xtal_sel[] = { "osc", "ckih", "ckih2", "pll4_sw", };
static const char *spdif_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "spdif_xtal_sel", };
static const char *spdif0_com_sel[] = { "spdif0_podf", "ssi1_root_gate", };
static const char *mx51_spdif1_com_sel[] = { "spdif1_podf", "ssi2_root_gate", };
static struct clk *clk[IMX5_CLK_END];
static struct clk_onecell_data clk_data;
static void __init mx5_clocks_common_init(void __iomem *ccm_base)
{
imx5_pm_set_ccm_base(ccm_base);
clk[IMX5_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
clk[IMX5_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0);
clk[IMX5_CLK_OSC] = imx_obtain_fixed_clock("osc", 0);
clk[IMX5_CLK_CKIH1] = imx_obtain_fixed_clock("ckih1", 0);
clk[IMX5_CLK_CKIH2] = imx_obtain_fixed_clock("ckih2", 0);
clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_PER_LP_APM] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[IMX5_CLK_PER_PRED1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
clk[IMX5_CLK_PER_PRED2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
clk[IMX5_CLK_PER_PODF] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
clk[IMX5_CLK_PER_ROOT] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
per_root_sel, ARRAY_SIZE(per_root_sel));
clk[IMX5_CLK_AHB] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
clk[IMX5_CLK_AHB_MAX] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
clk[IMX5_CLK_AIPS_TZ1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
clk[IMX5_CLK_AIPS_TZ2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
clk[IMX5_CLK_TMAX1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
clk[IMX5_CLK_TMAX2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
clk[IMX5_CLK_TMAX3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
clk[IMX5_CLK_SPBA] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
clk[IMX5_CLK_IPG] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
clk[IMX5_CLK_AXI_A] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
clk[IMX5_CLK_AXI_B] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
clk[IMX5_CLK_UART_SEL] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_UART_PRED] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
clk[IMX5_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_ESDHC_A_PRED] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
clk[IMX5_CLK_ESDHC_A_PODF] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
clk[IMX5_CLK_ESDHC_B_PRED] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
clk[IMX5_CLK_ESDHC_B_PODF] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_EMI_SEL] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
clk[IMX5_CLK_EMI_SLOW_PODF] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
clk[IMX5_CLK_NFC_PODF] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
clk[IMX5_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_ECSPI_PRED] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
clk[IMX5_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
clk[IMX5_CLK_USBOH3_SEL] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_USBOH3_PRED] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
clk[IMX5_CLK_USBOH3_PODF] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
clk[IMX5_CLK_USB_PHY_PRED] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
clk[IMX5_CLK_USB_PHY_PODF] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
clk[IMX5_CLK_USB_PHY_SEL] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
clk[IMX5_CLK_CPU_PODF] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
clk[IMX5_CLK_DI_PRED] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
clk[IMX5_CLK_IIM_GATE] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
clk[IMX5_CLK_UART1_IPG_GATE] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
clk[IMX5_CLK_UART1_PER_GATE] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
clk[IMX5_CLK_UART2_IPG_GATE] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
clk[IMX5_CLK_UART2_PER_GATE] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
clk[IMX5_CLK_UART3_IPG_GATE] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
clk[IMX5_CLK_UART3_PER_GATE] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
clk[IMX5_CLK_I2C1_GATE] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
clk[IMX5_CLK_I2C2_GATE] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
clk[IMX5_CLK_PWM1_IPG_GATE] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
clk[IMX5_CLK_PWM1_HF_GATE] = imx_clk_gate2("pwm1_hf_gate", "per_root", MXC_CCM_CCGR2, 12);
clk[IMX5_CLK_PWM2_IPG_GATE] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
clk[IMX5_CLK_PWM2_HF_GATE] = imx_clk_gate2("pwm2_hf_gate", "per_root", MXC_CCM_CCGR2, 16);
clk[IMX5_CLK_GPT_IPG_GATE] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 18);
clk[IMX5_CLK_GPT_HF_GATE] = imx_clk_gate2("gpt_hf_gate", "per_root", MXC_CCM_CCGR2, 20);
clk[IMX5_CLK_FEC_GATE] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
clk[IMX5_CLK_USBOH3_GATE] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
clk[IMX5_CLK_USBOH3_PER_GATE] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
clk[IMX5_CLK_ESDHC1_IPG_GATE] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
clk[IMX5_CLK_ESDHC2_IPG_GATE] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
clk[IMX5_CLK_ESDHC3_IPG_GATE] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
clk[IMX5_CLK_ESDHC4_IPG_GATE] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
clk[IMX5_CLK_SSI1_IPG_GATE] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
clk[IMX5_CLK_SSI2_IPG_GATE] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
clk[IMX5_CLK_SSI3_IPG_GATE] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
clk[IMX5_CLK_ECSPI1_IPG_GATE] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
clk[IMX5_CLK_ECSPI1_PER_GATE] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
clk[IMX5_CLK_ECSPI2_IPG_GATE] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
clk[IMX5_CLK_ECSPI2_PER_GATE] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
clk[IMX5_CLK_CSPI_IPG_GATE] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
clk[IMX5_CLK_SDMA_GATE] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
clk[IMX5_CLK_EMI_FAST_GATE] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
clk[IMX5_CLK_EMI_SLOW_GATE] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
clk[IMX5_CLK_IPU_SEL] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
clk[IMX5_CLK_IPU_GATE] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
clk[IMX5_CLK_NFC_GATE] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
clk[IMX5_CLK_IPU_DI0_GATE] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
clk[IMX5_CLK_IPU_DI1_GATE] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
clk[IMX5_CLK_GPU3D_SEL] = imx_clk_mux("gpu3d_sel", MXC_CCM_CBCMR, 4, 2, gpu3d_sel, ARRAY_SIZE(gpu3d_sel));
clk[IMX5_CLK_GPU2D_SEL] = imx_clk_mux("gpu2d_sel", MXC_CCM_CBCMR, 16, 2, gpu2d_sel, ARRAY_SIZE(gpu2d_sel));
clk[IMX5_CLK_GPU3D_GATE] = imx_clk_gate2("gpu3d_gate", "gpu3d_sel", MXC_CCM_CCGR5, 2);
clk[IMX5_CLK_GARB_GATE] = imx_clk_gate2("garb_gate", "axi_a", MXC_CCM_CCGR5, 4);
clk[IMX5_CLK_GPU2D_GATE] = imx_clk_gate2("gpu2d_gate", "gpu2d_sel", MXC_CCM_CCGR6, 14);
clk[IMX5_CLK_VPU_SEL] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[IMX5_CLK_VPU_GATE] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[IMX5_CLK_VPU_REFERENCE_GATE] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
clk[IMX5_CLK_UART4_IPG_GATE] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[IMX5_CLK_UART4_PER_GATE] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[IMX5_CLK_UART5_IPG_GATE] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[IMX5_CLK_UART5_PER_GATE] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
clk[IMX5_CLK_SSI_APM] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
clk[IMX5_CLK_SSI1_ROOT_SEL] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[IMX5_CLK_SSI2_ROOT_SEL] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[IMX5_CLK_SSI3_ROOT_SEL] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
clk[IMX5_CLK_SSI_EXT1_SEL] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[IMX5_CLK_SSI_EXT2_SEL] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
clk[IMX5_CLK_SSI_EXT1_COM_SEL] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
clk[IMX5_CLK_SSI_EXT2_COM_SEL] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
clk[IMX5_CLK_SSI1_ROOT_PRED] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
clk[IMX5_CLK_SSI1_ROOT_PODF] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
clk[IMX5_CLK_SSI2_ROOT_PRED] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
clk[IMX5_CLK_SSI2_ROOT_PODF] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
clk[IMX5_CLK_SSI_EXT1_PRED] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
clk[IMX5_CLK_SSI_EXT1_PODF] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
clk[IMX5_CLK_SSI_EXT2_PRED] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
clk[IMX5_CLK_SSI_EXT2_PODF] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
clk[IMX5_CLK_SSI1_ROOT_GATE] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
clk[IMX5_CLK_SSI2_ROOT_GATE] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
clk[IMX5_CLK_SSI3_ROOT_GATE] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
clk[IMX5_CLK_SSI_EXT1_GATE] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
clk[IMX5_CLK_SSI_EXT2_GATE] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
clk[IMX5_CLK_EPIT1_IPG_GATE] = imx_clk_gate2("epit1_ipg_gate", "ipg", MXC_CCM_CCGR2, 2);
clk[IMX5_CLK_EPIT1_HF_GATE] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
clk[IMX5_CLK_EPIT2_IPG_GATE] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
clk[IMX5_CLK_EPIT2_HF_GATE] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
clk[IMX5_CLK_OWIRE_GATE] = imx_clk_gate2("owire_gate", "per_root", MXC_CCM_CCGR2, 22);
clk[IMX5_CLK_SRTC_GATE] = imx_clk_gate2("srtc_gate", "per_root", MXC_CCM_CCGR4, 28);
clk[IMX5_CLK_PATA_GATE] = imx_clk_gate2("pata_gate", "ipg", MXC_CCM_CCGR4, 0);
clk[IMX5_CLK_SPDIF0_SEL] = imx_clk_mux("spdif0_sel", MXC_CCM_CSCMR2, 0, 2, spdif_sel, ARRAY_SIZE(spdif_sel));
clk[IMX5_CLK_SPDIF0_PRED] = imx_clk_divider("spdif0_pred", "spdif0_sel", MXC_CCM_CDCDR, 25, 3);
clk[IMX5_CLK_SPDIF0_PODF] = imx_clk_divider("spdif0_podf", "spdif0_pred", MXC_CCM_CDCDR, 19, 6);
clk[IMX5_CLK_SPDIF0_COM_SEL] = imx_clk_mux_flags("spdif0_com_sel", MXC_CCM_CSCMR2, 4, 1,
spdif0_com_sel, ARRAY_SIZE(spdif0_com_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_SPDIF0_GATE] = imx_clk_gate2("spdif0_gate", "spdif0_com_sel", MXC_CCM_CCGR5, 26);
clk[IMX5_CLK_SPDIF_IPG_GATE] = imx_clk_gate2("spdif_ipg_gate", "ipg", MXC_CCM_CCGR5, 30);
clk[IMX5_CLK_SAHARA_IPG_GATE] = imx_clk_gate2("sahara_ipg_gate", "ipg", MXC_CCM_CCGR4, 14);
clk[IMX5_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "usb_phy1_gate", 1, 1);
clk_register_clkdev(clk[IMX5_CLK_CPU_PODF], NULL, "cpu0");
clk_register_clkdev(clk[IMX5_CLK_GPC_DVFS], "gpc_dvfs", NULL);
/* Set SDHC parents to be PLL2 */
clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
clk_prepare_enable(clk[IMX5_CLK_GPC_DVFS]);
clk_prepare_enable(clk[IMX5_CLK_AHB_MAX]); /* esdhc3 */
clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ1]);
clk_prepare_enable(clk[IMX5_CLK_AIPS_TZ2]); /* fec */
clk_prepare_enable(clk[IMX5_CLK_SPBA]);
clk_prepare_enable(clk[IMX5_CLK_EMI_FAST_GATE]); /* fec */
clk_prepare_enable(clk[IMX5_CLK_EMI_SLOW_GATE]); /* eim */
clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC1_GATE]);
clk_prepare_enable(clk[IMX5_CLK_MIPI_HSC2_GATE]);
clk_prepare_enable(clk[IMX5_CLK_MIPI_ESC_GATE]);
clk_prepare_enable(clk[IMX5_CLK_MIPI_HSP_GATE]);
clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
}
static void __init mx50_clocks_init(struct device_node *np)
{
void __iomem *ccm_base;
void __iomem *pll_base;
unsigned long r;
pll_base = ioremap(MX53_DPLL1_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL1_SW] = imx_clk_pllv2("pll1_sw", "osc", pll_base);
pll_base = ioremap(MX53_DPLL2_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL2_SW] = imx_clk_pllv2("pll2_sw", "osc", pll_base);
pll_base = ioremap(MX53_DPLL3_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL3_SW] = imx_clk_pllv2("pll3_sw", "osc", pll_base);
ccm_base = of_iomap(np, 0);
WARN_ON(!ccm_base);
mx5_clocks_common_init(ccm_base);
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY1_GATE] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[IMX5_CLK_USB_PHY2_GATE] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
clk[IMX5_CLK_I2C3_GATE] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[IMX5_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
clk[IMX5_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
clk[IMX5_CLK_CKO1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7);
clk[IMX5_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5,
mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel));
clk[IMX5_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3);
clk[IMX5_CLK_CKO2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24);
imx_check_clocks(clk, ARRAY_SIZE(clk));
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
clk_prepare_enable(clk[IMX5_CLK_IIM_GATE]);
imx_print_silicon_rev("i.MX50", IMX_CHIP_REVISION_1_1);
clk_disable_unprepare(clk[IMX5_CLK_IIM_GATE]);
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
}
CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
static void __init mx51_clocks_init(struct device_node *np)
{
void __iomem *ccm_base;
void __iomem *pll_base;
u32 val;
pll_base = ioremap(MX51_DPLL1_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL1_SW] = imx_clk_pllv2("pll1_sw", "osc", pll_base);
pll_base = ioremap(MX51_DPLL2_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL2_SW] = imx_clk_pllv2("pll2_sw", "osc", pll_base);
pll_base = ioremap(MX51_DPLL3_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL3_SW] = imx_clk_pllv2("pll3_sw", "osc", pll_base);
ccm_base = of_iomap(np, 0);
WARN_ON(!ccm_base);
mx5_clocks_common_init(ccm_base);
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
clk[IMX5_CLK_TVE_EXT_SEL] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_SEL] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1,
mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel));
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
clk[IMX5_CLK_MIPI_HSP_GATE] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
clk[IMX5_CLK_SPDIF_XTAL_SEL] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2,
mx51_spdif_xtal_sel, ARRAY_SIZE(mx51_spdif_xtal_sel));
clk[IMX5_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", MXC_CCM_CSCMR2, 2, 2,
spdif_sel, ARRAY_SIZE(spdif_sel));
clk[IMX5_CLK_SPDIF1_PRED] = imx_clk_divider("spdif1_pred", "spdif1_sel", MXC_CCM_CDCDR, 16, 3);
clk[IMX5_CLK_SPDIF1_PODF] = imx_clk_divider("spdif1_podf", "spdif1_pred", MXC_CCM_CDCDR, 9, 6);
clk[IMX5_CLK_SPDIF1_COM_SEL] = imx_clk_mux("spdif1_com_sel", MXC_CCM_CSCMR2, 5, 1,
mx51_spdif1_com_sel, ARRAY_SIZE(mx51_spdif1_com_sel));
clk[IMX5_CLK_SPDIF1_GATE] = imx_clk_gate2("spdif1_gate", "spdif1_com_sel", MXC_CCM_CCGR5, 28);
imx_check_clocks(clk, ARRAY_SIZE(clk));
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[IMX5_CLK_USBOH3_SEL], clk[IMX5_CLK_PLL2_SW]);
/* set SDHC root clock to 166.25MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 166250000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 166250000);
clk_prepare_enable(clk[IMX5_CLK_IIM_GATE]);
imx_print_silicon_rev("i.MX51", mx51_revision());
clk_disable_unprepare(clk[IMX5_CLK_IIM_GATE]);
/*
* Reference Manual says: Functionality of CCDR[18] and CLPCR[23] is no
* longer supported. Set to one for better power saving.
*
* The effect of not setting these bits is that MIPI clocks can't be
* enabled without the IPU clock being enabled aswell.
*/
val = readl(MXC_CCM_CCDR);
val |= 1 << 18;
writel(val, MXC_CCM_CCDR);
val = readl(MXC_CCM_CLPCR);
val |= 1 << 23;
writel(val, MXC_CCM_CLPCR);
}
CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
static void __init mx53_clocks_init(struct device_node *np)
{
void __iomem *ccm_base;
void __iomem *pll_base;
unsigned long r;
pll_base = ioremap(MX53_DPLL1_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL1_SW] = imx_clk_pllv2("pll1_sw", "osc", pll_base);
pll_base = ioremap(MX53_DPLL2_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL2_SW] = imx_clk_pllv2("pll2_sw", "osc", pll_base);
pll_base = ioremap(MX53_DPLL3_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL3_SW] = imx_clk_pllv2("pll3_sw", "osc", pll_base);
pll_base = ioremap(MX53_DPLL4_BASE, SZ_16K);
WARN_ON(!pll_base);
clk[IMX5_CLK_PLL4_SW] = imx_clk_pllv2("pll4_sw", "osc", pll_base);
ccm_base = of_iomap(np, 0);
WARN_ON(!ccm_base);
mx5_clocks_common_init(ccm_base);
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
clk[IMX5_CLK_LDB_DI1_DIV] = imx_clk_divider_flags("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1, 0);
clk[IMX5_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_DI_PLL4_PODF] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
clk[IMX5_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[IMX5_CLK_LDB_DI0_DIV] = imx_clk_divider_flags("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1, 0);
clk[IMX5_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_LDB_DI0_GATE] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
clk[IMX5_CLK_LDB_DI1_GATE] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
clk[IMX5_CLK_IPU_DI1_SEL] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
clk[IMX5_CLK_TVE_EXT_SEL] = imx_clk_mux_flags("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
clk[IMX5_CLK_USB_PHY1_GATE] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[IMX5_CLK_USB_PHY2_GATE] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
clk[IMX5_CLK_CAN_SEL] = imx_clk_mux("can_sel", MXC_CCM_CSCMR2, 6, 2,
mx53_can_sel, ARRAY_SIZE(mx53_can_sel));
clk[IMX5_CLK_CAN1_SERIAL_GATE] = imx_clk_gate2("can1_serial_gate", "can_sel", MXC_CCM_CCGR6, 22);
clk[IMX5_CLK_CAN1_IPG_GATE] = imx_clk_gate2("can1_ipg_gate", "ipg", MXC_CCM_CCGR6, 20);
clk[IMX5_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", MXC_CCM_CCGR6, 2);
clk[IMX5_CLK_CAN2_SERIAL_GATE] = imx_clk_gate2("can2_serial_gate", "can_sel", MXC_CCM_CCGR4, 8);
clk[IMX5_CLK_CAN2_IPG_GATE] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 6);
clk[IMX5_CLK_I2C3_GATE] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[IMX5_CLK_SATA_GATE] = imx_clk_gate2("sata_gate", "ipg", MXC_CCM_CCGR4, 2);
clk[IMX5_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
clk[IMX5_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", MXC_CCM_CCOSR, 4, 3);
clk[IMX5_CLK_CKO1] = imx_clk_gate2("cko1", "cko1_podf", MXC_CCM_CCOSR, 7);
clk[IMX5_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", MXC_CCM_CCOSR, 16, 5,
mx53_cko2_sel, ARRAY_SIZE(mx53_cko2_sel));
clk[IMX5_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", MXC_CCM_CCOSR, 21, 3);
clk[IMX5_CLK_CKO2] = imx_clk_gate2("cko2", "cko2_podf", MXC_CCM_CCOSR, 24);
clk[IMX5_CLK_SPDIF_XTAL_SEL] = imx_clk_mux("spdif_xtal_sel", MXC_CCM_CSCMR1, 2, 2,
mx53_spdif_xtal_sel, ARRAY_SIZE(mx53_spdif_xtal_sel));
imx_check_clocks(clk, ARRAY_SIZE(clk));
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
/* move can bus clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_CAN_SEL], clk[IMX5_CLK_LP_APM]);
clk_prepare_enable(clk[IMX5_CLK_IIM_GATE]);
imx_print_silicon_rev("i.MX53", mx53_revision());
clk_disable_unprepare(clk[IMX5_CLK_IIM_GATE]);
r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
}
CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
| gpl-2.0 |
dongsupark/linux | drivers/iio/humidity/si7005.c | 411 | 4259 | /*
* si7005.c - Support for Silabs Si7005 humidity and temperature sensor
*
* Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
* This file is subject to the terms and conditions of version 2 of
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
* (7-bit I2C slave address 0x40)
*
* TODO: heater, fast mode, processed mode (temp. / linearity compensation)
*/
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define SI7005_STATUS 0x00
#define SI7005_DATA 0x01 /* 16-bit, MSB */
#define SI7005_CONFIG 0x03
#define SI7005_ID 0x11
#define SI7005_STATUS_NRDY BIT(0)
#define SI7005_CONFIG_TEMP BIT(4)
#define SI7005_CONFIG_START BIT(0)
#define SI7005_ID_7005 0x50
#define SI7005_ID_7015 0xf0
struct si7005_data {
struct i2c_client *client;
struct mutex lock;
u8 config;
};
static int si7005_read_measurement(struct si7005_data *data, bool temp)
{
int tries = 50;
int ret;
mutex_lock(&data->lock);
ret = i2c_smbus_write_byte_data(data->client, SI7005_CONFIG,
data->config | SI7005_CONFIG_START |
(temp ? SI7005_CONFIG_TEMP : 0));
if (ret < 0)
goto done;
while (tries-- > 0) {
msleep(20);
ret = i2c_smbus_read_byte_data(data->client, SI7005_STATUS);
if (ret < 0)
goto done;
if (!(ret & SI7005_STATUS_NRDY))
break;
}
if (tries < 0) {
ret = -EIO;
goto done;
}
ret = i2c_smbus_read_word_swapped(data->client, SI7005_DATA);
done:
mutex_unlock(&data->lock);
return ret;
}
static int si7005_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
struct si7005_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = si7005_read_measurement(data, chan->type == IIO_TEMP);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_TEMP) {
*val = 7;
*val2 = 812500;
} else {
*val = 3;
*val2 = 906250;
}
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP)
*val = -50 * 32 * 4;
else
*val = -24 * 16 * 16;
return IIO_VAL_INT;
default:
break;
}
return -EINVAL;
}
static const struct iio_chan_spec si7005_channels[] = {
{
.type = IIO_HUMIDITYRELATIVE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
}
};
static const struct iio_info si7005_info = {
.read_raw = si7005_read_raw,
.driver_module = THIS_MODULE,
};
static int si7005_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *indio_dev;
struct si7005_data *data;
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
mutex_init(&data->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->name = dev_name(&client->dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &si7005_info;
indio_dev->channels = si7005_channels;
indio_dev->num_channels = ARRAY_SIZE(si7005_channels);
ret = i2c_smbus_read_byte_data(client, SI7005_ID);
if (ret < 0)
return ret;
if (ret != SI7005_ID_7005 && ret != SI7005_ID_7015)
return -ENODEV;
ret = i2c_smbus_read_byte_data(client, SI7005_CONFIG);
if (ret < 0)
return ret;
data->config = ret;
return devm_iio_device_register(&client->dev, indio_dev);
}
static const struct i2c_device_id si7005_id[] = {
{ "si7005", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7005_id);
static struct i2c_driver si7005_driver = {
.driver = {
.name = "si7005",
},
.probe = si7005_probe,
.id_table = si7005_id,
};
module_i2c_driver(si7005_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
MODULE_DESCRIPTION("Silabs Si7005 humidity and temperature sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jdkernel/mecha_sense_2.6.35 | mm/memory_hotplug.c | 411 | 22600 | /*
* linux/mm/memory_hotplug.c
*
* Copyright (C)
*/
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/mm_inline.h>
#include <linux/firmware-map.h>
#include <asm/tlbflush.h>
#include "internal.h"
/* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size)
{
struct resource *res;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
BUG_ON(!res);
res->name = "System RAM";
res->start = start;
res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
printk("System RAM resource %llx - %llx cannot be added\n",
(unsigned long long)res->start, (unsigned long long)res->end);
kfree(res);
res = NULL;
}
return res;
}
static void release_memory_resource(struct resource *res)
{
if (!res)
return;
release_resource(res);
kfree(res);
return;
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static void get_page_bootmem(unsigned long info, struct page *page, int type)
{
atomic_set(&page->_mapcount, type);
SetPagePrivate(page);
set_page_private(page, info);
atomic_inc(&page->_count);
}
/* reference to __meminit __free_pages_bootmem is valid
* so use __ref to tell modpost not to generate a warning */
void __ref put_page_bootmem(struct page *page)
{
int type;
type = atomic_read(&page->_mapcount);
BUG_ON(type >= -1);
if (atomic_dec_return(&page->_count) == 1) {
ClearPagePrivate(page);
set_page_private(page, 0);
reset_page_mapcount(page);
__free_pages_bootmem(page, 0);
}
}
static void register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long *usemap, mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
if (!pfn_valid(start_pfn))
return;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
/* Get section's memmap address */
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
/*
* Get page for the memmap's phys address
* XXX: need more consideration for sparse_vmemmap...
*/
page = virt_to_page(memmap);
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
/* remember memmap's page */
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
usemap = __nr_to_section(section_nr)->pageblock_flags;
page = virt_to_page(usemap);
mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
}
void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
struct page *page;
struct zone *zone;
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
page = virt_to_page(pgdat);
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
zone = &pgdat->node_zones[0];
for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
if (zone->wait_table) {
nr_pages = zone->wait_table_hash_nr_entries
* sizeof(wait_queue_head_t);
nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
page = virt_to_page(zone->wait_table);
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
}
}
pfn = pgdat->node_start_pfn;
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
register_page_bootmem_info_section(pfn);
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long old_zone_end_pfn;
zone_span_writelock(zone);
old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
if (start_pfn < zone->zone_start_pfn)
zone->zone_start_pfn = start_pfn;
zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
zone->zone_start_pfn;
zone_span_writeunlock(zone);
}
static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long old_pgdat_end_pfn =
pgdat->node_start_pfn + pgdat->node_spanned_pages;
if (start_pfn < pgdat->node_start_pfn)
pgdat->node_start_pfn = start_pfn;
pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
pgdat->node_start_pfn;
}
static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int nr_pages = PAGES_PER_SECTION;
int nid = pgdat->node_id;
int zone_type;
unsigned long flags;
zone_type = zone - pgdat->node_zones;
if (!zone->wait_table) {
int ret;
ret = init_currently_empty_zone(zone, phys_start_pfn,
nr_pages, MEMMAP_HOTPLUG);
if (ret)
return ret;
}
pgdat_resize_lock(zone->zone_pgdat, &flags);
grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
phys_start_pfn + nr_pages);
pgdat_resize_unlock(zone->zone_pgdat, &flags);
memmap_init_zone(nr_pages, nid, zone_type,
phys_start_pfn, MEMMAP_HOTPLUG);
return 0;
}
static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn)
{
int nr_pages = PAGES_PER_SECTION;
int ret;
if (pfn_valid(phys_start_pfn))
return -EEXIST;
ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
if (ret < 0)
return ret;
ret = __add_zone(zone, phys_start_pfn);
if (ret < 0)
return ret;
return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
/*
* XXX: Freeing memmap with vmemmap is not implement yet.
* This should be removed later.
*/
return -EBUSY;
}
#else
static int __remove_section(struct zone *zone, struct mem_section *ms)
{
unsigned long flags;
struct pglist_data *pgdat = zone->zone_pgdat;
int ret = -EINVAL;
if (!valid_section(ms))
return ret;
ret = unregister_memory_section(ms);
if (ret)
return ret;
pgdat_resize_lock(pgdat, &flags);
sparse_remove_one_section(zone, ms);
pgdat_resize_unlock(pgdat, &flags);
return 0;
}
#endif
/*
* Reasonably generic function for adding memory. It is
* expected that archs that support memory hotplug will
* call this function after deciding the zone to which to
* add the new pages.
*/
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i;
int err = 0;
int start_sec, end_sec;
/* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr(phys_start_pfn);
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
for (i = start_sec; i <= end_sec; i++) {
err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
/*
* EEXIST is finally dealt with by ioresource collision
* check. see add_memory() => register_memory_resource()
* Warning will be printed if there is collision.
*/
if (err && (err != -EEXIST))
break;
err = 0;
}
return err;
}
EXPORT_SYMBOL_GPL(__add_pages);
/**
* __remove_pages() - remove sections of pages from a zone
* @zone: zone from which pages need to be removed
* @phys_start_pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
*
* Generic helper function to remove section mappings and sysfs entries
* for the section of the memory we are removing. Caller needs to make
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages().
*/
int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
unsigned long nr_pages)
{
unsigned long i, ret = 0;
int sections_to_remove;
/*
* We can only remove entire sections
*/
BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
BUG_ON(nr_pages % PAGES_PER_SECTION);
sections_to_remove = nr_pages / PAGES_PER_SECTION;
for (i = 0; i < sections_to_remove; i++) {
unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
release_mem_region(pfn << PAGE_SHIFT,
PAGES_PER_SECTION << PAGE_SHIFT);
ret = __remove_section(zone, __pfn_to_section(pfn));
if (ret)
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(__remove_pages);
void online_page(struct page *page)
{
unsigned long pfn = page_to_pfn(page);
totalram_pages++;
if (pfn >= num_physpages)
num_physpages = pfn + 1;
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages++;
#endif
#ifdef CONFIG_FLATMEM
max_mapnr = max(page_to_pfn(page), max_mapnr);
#endif
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
}
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg)
{
unsigned long i;
unsigned long onlined_pages = *(unsigned long *)arg;
struct page *page;
if (PageReserved(pfn_to_page(start_pfn)))
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(start_pfn + i);
online_page(page);
onlined_pages++;
}
*(unsigned long *)arg = onlined_pages;
return 0;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
{
unsigned long onlined_pages = 0;
struct zone *zone;
int need_zonelists_rebuild = 0;
int nid;
int ret;
struct memory_notify arg;
arg.start_pfn = pfn;
arg.nr_pages = nr_pages;
arg.status_change_nid = -1;
nid = page_to_nid(pfn_to_page(pfn));
if (node_present_pages(nid) == 0)
arg.status_change_nid = nid;
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
if (ret) {
memory_notify(MEM_CANCEL_ONLINE, &arg);
return ret;
}
/*
* This doesn't need a lock to do pfn_to_page().
* The section can't be removed here because of the
* memory_block->state_mutex.
*/
zone = page_zone(pfn_to_page(pfn));
/*
* If this zone is not populated, then it is not in zonelist.
* This means the page allocator ignores this zone.
* So, zonelist must be updated after online.
*/
mutex_lock(&zonelists_mutex);
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
online_pages_range);
if (ret) {
mutex_unlock(&zonelists_mutex);
printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
nr_pages, pfn);
memory_notify(MEM_CANCEL_ONLINE, &arg);
return ret;
}
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
if (need_zonelists_rebuild)
build_all_zonelists(zone);
else
zone_pcp_update(zone);
mutex_unlock(&zonelists_mutex);
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
if (onlined_pages) {
kswapd_run(zone_to_nid(zone));
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
}
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
if (onlined_pages)
memory_notify(MEM_ONLINE, &arg);
return 0;
}
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
{
struct pglist_data *pgdat;
unsigned long zones_size[MAX_NR_ZONES] = {0};
unsigned long zholes_size[MAX_NR_ZONES] = {0};
unsigned long start_pfn = start >> PAGE_SHIFT;
pgdat = arch_alloc_nodedata(nid);
if (!pgdat)
return NULL;
arch_refresh_nodedata(nid, pgdat);
/* we can use NODE_DATA(nid) from here */
/* init node's zones as empty zones, we don't have any present pages.*/
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
return pgdat;
}
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
{
arch_refresh_nodedata(nid, NULL);
arch_free_nodedata(pgdat);
return;
}
/*
* called by cpu_up() to online a node without onlined memory.
*/
int mem_online_node(int nid)
{
pg_data_t *pgdat;
int ret;
lock_system_sleep();
pgdat = hotadd_new_pgdat(nid, 0);
if (pgdat) {
ret = -ENOMEM;
goto out;
}
node_set_online(nid);
ret = register_one_node(nid);
BUG_ON(ret);
out:
unlock_system_sleep();
return ret;
}
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
int __ref add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat = NULL;
int new_pgdat = 0;
struct resource *res;
int ret;
lock_system_sleep();
res = register_memory_resource(start, size);
ret = -EEXIST;
if (!res)
goto out;
if (!node_online(nid)) {
pgdat = hotadd_new_pgdat(nid, start);
ret = -ENOMEM;
if (!pgdat)
goto out;
new_pgdat = 1;
}
/* call arch's memory hotadd */
ret = arch_add_memory(nid, start, size);
if (ret < 0)
goto error;
/* we online node here. we can't roll back from here. */
node_set_online(nid);
if (new_pgdat) {
ret = register_one_node(nid);
/*
* If sysfs file of new node can't create, cpu on the node
* can't be hot-added. There is no rollback way now.
* So, check by BUG_ON() to catch it reluctantly..
*/
BUG_ON(ret);
}
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
goto out;
error:
/* rollback pgdat allocation and others */
if (new_pgdat)
rollback_node_hotadd(nid, pgdat);
if (res)
release_memory_resource(res);
out:
unlock_system_sleep();
return ret;
}
EXPORT_SYMBOL_GPL(add_memory);
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
* set and the size of the free page is given by page_order(). Using this,
* the function determines if the pageblock contains only free pages.
* Due to buddy contraints, a free page at least the size of a pageblock will
* be located at the start of the pageblock
*/
static inline int pageblock_free(struct page *page)
{
return PageBuddy(page) && page_order(page) >= pageblock_order;
}
/* Return the start of the next active pageblock after a given page */
static struct page *next_active_pageblock(struct page *page)
{
/* Ensure the starting page is pageblock-aligned */
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) {
int order;
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order))
return page + (1 << order);
}
return page + pageblock_nr_pages;
}
/* Checks if this range of memory is likely to be hot-removable. */
int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
int type;
struct page *page = pfn_to_page(start_pfn);
struct page *end_page = page + nr_pages;
/* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) {
type = get_pageblock_migratetype(page);
/*
* A pageblock containing MOVABLE or free pages is considered
* removable
*/
if (type != MIGRATE_MOVABLE && !pageblock_free(page))
return 0;
/*
* A pageblock starting with a PageReserved page is not
* considered removable.
*/
if (PageReserved(page))
return 0;
}
/* All pageblocks in the memory block are likely to be hot-removable */
return 1;
}
/*
* Confirm all pages in a range [start, end) is belongs to the same zone.
*/
static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct zone *zone = NULL;
struct page *page;
int i;
for (pfn = start_pfn;
pfn < end_pfn;
pfn += MAX_ORDER_NR_PAGES) {
i = 0;
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
i++;
if (i == MAX_ORDER_NR_PAGES)
continue;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
return 0;
zone = page_zone(page);
}
return 1;
}
/*
* Scanning pfn is much easier than scanning lru list.
* Scan pfn from start to end and Find LRU page.
*/
int scan_lru_pages(unsigned long start, unsigned long end)
{
unsigned long pfn;
struct page *page;
for (pfn = start; pfn < end; pfn++) {
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
if (PageLRU(page))
return pfn;
}
}
return 0;
}
static struct page *
hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
{
/* This should be improooooved!! */
return alloc_page(GFP_HIGHUSER_MOVABLE);
}
#define NR_OFFLINE_AT_ONCE_PAGES (256)
static int
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long pfn;
struct page *page;
int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
int not_managed = 0;
int ret = 0;
LIST_HEAD(source);
for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (!page_count(page))
continue;
/*
* We can skip free pages. And we can only deal with pages on
* LRU.
*/
ret = isolate_lru_page(page);
if (!ret) { /* Success */
list_add_tail(&page->lru, &source);
move_pages--;
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
} else {
/* Becasue we don't have big zone->lock. we should
check this again here. */
if (page_count(page))
not_managed++;
#ifdef CONFIG_DEBUG_VM
printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
pfn);
dump_page(page);
#endif
}
}
ret = -EBUSY;
if (not_managed) {
if (!list_empty(&source))
putback_lru_pages(&source);
goto out;
}
ret = 0;
if (list_empty(&source))
goto out;
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1);
out:
return ret;
}
/*
* remove from free_area[] and mark all as Reserved.
*/
static int
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
void *data)
{
__offline_isolated_pages(start, start + nr_pages);
return 0;
}
static void
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
{
walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
offline_isolated_pages_cb);
}
/*
* Check all pages in range, recoreded as memory resource, are isolated.
*/
static int
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
void *data)
{
int ret;
long offlined = *(long *)data;
ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
offlined = nr_pages;
if (!ret)
*(long *)data += offlined;
return ret;
}
static long
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
{
long offlined = 0;
int ret;
ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
check_pages_isolated_cb);
if (ret < 0)
offlined = (long)ret;
return offlined;
}
static int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
unsigned long pfn, nr_pages, expire;
long offlined_pages;
int ret, drain, retry_max, node;
struct zone *zone;
struct memory_notify arg;
BUG_ON(start_pfn >= end_pfn);
/* at least, alignment against pageblock is necessary */
if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
return -EINVAL;
if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
return -EINVAL;
/* This makes hotplug much easier...and readable.
we assume this for now. .*/
if (!test_pages_in_a_zone(start_pfn, end_pfn))
return -EINVAL;
lock_system_sleep();
zone = page_zone(pfn_to_page(start_pfn));
node = zone_to_nid(zone);
nr_pages = end_pfn - start_pfn;
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn);
if (ret)
goto out;
arg.start_pfn = start_pfn;
arg.nr_pages = nr_pages;
arg.status_change_nid = -1;
if (nr_pages >= node_present_pages(node))
arg.status_change_nid = node;
ret = memory_notify(MEM_GOING_OFFLINE, &arg);
ret = notifier_to_errno(ret);
if (ret)
goto failed_removal;
pfn = start_pfn;
expire = jiffies + timeout;
drain = 0;
retry_max = 5;
repeat:
/* start memory hot removal */
ret = -EAGAIN;
if (time_after(jiffies, expire))
goto failed_removal;
ret = -EINTR;
if (signal_pending(current))
goto failed_removal;
ret = 0;
if (drain) {
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
drain_all_pages();
}
pfn = scan_lru_pages(start_pfn, end_pfn);
if (pfn) { /* We have page on LRU */
ret = do_migrate_range(pfn, end_pfn);
if (!ret) {
drain = 1;
goto repeat;
} else {
if (ret < 0)
if (--retry_max == 0)
goto failed_removal;
yield();
drain = 1;
goto repeat;
}
}
/* drain all zone's lru pagevec, this is asyncronous... */
lru_add_drain_all();
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
drain_all_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
ret = -EBUSY;
goto failed_removal;
}
printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
/* Ok, all of our target is islaoted.
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
undo_isolate_page_range(start_pfn, end_pfn);
/* removal success */
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages;
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone);
if (!node_present_pages(node)) {
node_clear_state(node, N_HIGH_MEMORY);
kswapd_stop(node);
}
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
memory_notify(MEM_OFFLINE, &arg);
unlock_system_sleep();
return 0;
failed_removal:
printk(KERN_INFO "memory offlining %lx to %lx failed\n",
start_pfn, end_pfn);
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
undo_isolate_page_range(start_pfn, end_pfn);
out:
unlock_system_sleep();
return ret;
}
int remove_memory(u64 start, u64 size)
{
unsigned long start_pfn, end_pfn;
start_pfn = PFN_DOWN(start);
end_pfn = start_pfn + PFN_DOWN(size);
return offline_pages(start_pfn, end_pfn, 120 * HZ);
}
#else
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
EXPORT_SYMBOL_GPL(remove_memory);
| gpl-2.0 |
linux-shield/kernel | arch/arm/mach-pxa/pxa27x.c | 411 | 12560 | /*
* linux/arch/arm/mach-pxa/pxa27x.c
*
* Author: Nicolas Pitre
* Created: Nov 05, 2002
* Copyright: MontaVista Software Inc.
*
* Code specific to PXA27x aka Bulverde.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
#include <linux/gpio-pxa.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/suspend.h>
#include <mach/irqs.h>
#include <mach/pxa27x.h>
#include <mach/reset.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
#include "clock.h"
void pxa27x_clear_otgph(void)
{
if (cpu_is_pxa27x() && (PSSR & PSSR_OTGPH))
PSSR |= PSSR_OTGPH;
}
EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = {
GPIO113_AC97_nRESET_GPIO_HIGH,
GPIO113_AC97_nRESET,
GPIO95_AC97_nRESET_GPIO_HIGH,
GPIO95_AC97_nRESET,
};
void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio)
{
/*
* This helper function is used to work around a bug in the pxa27x's
* ac97 controller during a warm reset. The configuration of the
* reset_gpio is changed as follows:
* to_gpio == true: configured to generic output gpio and driven high
* to_gpio == false: configured to ac97 controller alt fn AC97_nRESET
*/
if (reset_gpio == 113)
pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[0] :
&ac97_reset_config[1], 1);
if (reset_gpio == 95)
pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[2] :
&ac97_reset_config[3], 1);
}
EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);
/* Crystal clock: 13MHz */
#define BASE_CLK 13000000
/*
* Get the clock frequency as reflected by CCSR and the turbo flag.
* We assume these values have been applied via a fcs.
* If info is not 0 we also display the current settings.
*/
unsigned int pxa27x_get_clk_frequency_khz(int info)
{
unsigned long ccsr, clkcfg;
unsigned int l, L, m, M, n2, N, S;
int cccr_a, t, ht, b;
ccsr = CCSR;
cccr_a = CCCR & (1 << 25);
/* Read clkcfg register: it has turbo, b, half-turbo (and f) */
asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
t = clkcfg & (1 << 0);
ht = clkcfg & (1 << 2);
b = clkcfg & (1 << 3);
l = ccsr & 0x1f;
n2 = (ccsr>>7) & 0xf;
m = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
L = l * BASE_CLK;
N = (L * n2) / 2;
M = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
S = (b) ? L : (L/2);
if (info) {
printk( KERN_INFO "Run Mode clock: %d.%02dMHz (*%d)\n",
L / 1000000, (L % 1000000) / 10000, l );
printk( KERN_INFO "Turbo Mode clock: %d.%02dMHz (*%d.%d, %sactive)\n",
N / 1000000, (N % 1000000)/10000, n2 / 2, (n2 % 2)*5,
(t) ? "" : "in" );
printk( KERN_INFO "Memory clock: %d.%02dMHz (/%d)\n",
M / 1000000, (M % 1000000) / 10000, m );
printk( KERN_INFO "System bus clock: %d.%02dMHz \n",
S / 1000000, (S % 1000000) / 10000 );
}
return (t) ? (N/1000) : (L/1000);
}
/*
* Return the current mem clock frequency as reflected by CCCR[A], B, and L
*/
static unsigned long clk_pxa27x_mem_getrate(struct clk *clk)
{
unsigned long ccsr, clkcfg;
unsigned int l, L, m, M;
int cccr_a, b;
ccsr = CCSR;
cccr_a = CCCR & (1 << 25);
/* Read clkcfg register: it has turbo, b, half-turbo (and f) */
asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
b = clkcfg & (1 << 3);
l = ccsr & 0x1f;
m = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
L = l * BASE_CLK;
M = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
return M;
}
static const struct clkops clk_pxa27x_mem_ops = {
.enable = clk_dummy_enable,
.disable = clk_dummy_disable,
.getrate = clk_pxa27x_mem_getrate,
};
/*
* Return the current LCD clock frequency in units of 10kHz as
*/
static unsigned int pxa27x_get_lcdclk_frequency_10khz(void)
{
unsigned long ccsr;
unsigned int l, L, k, K;
ccsr = CCSR;
l = ccsr & 0x1f;
k = (l <= 7) ? 1 : (l <= 16) ? 2 : 4;
L = l * BASE_CLK;
K = L / k;
return (K / 10000);
}
static unsigned long clk_pxa27x_lcd_getrate(struct clk *clk)
{
return pxa27x_get_lcdclk_frequency_10khz() * 10000;
}
static const struct clkops clk_pxa27x_lcd_ops = {
.enable = clk_pxa2xx_cken_enable,
.disable = clk_pxa2xx_cken_disable,
.getrate = clk_pxa27x_lcd_getrate,
};
static DEFINE_PXA2_CKEN(pxa27x_ffuart, FFUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_btuart, BTUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_stuart, STUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_i2s, I2S, 14682000, 0);
static DEFINE_PXA2_CKEN(pxa27x_i2c, I2C, 32842000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usb, USB, 48000000, 5);
static DEFINE_PXA2_CKEN(pxa27x_mmc, MMC, 19500000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ficp, FICP, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usbhost, USBHOST, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwri2c, PWRI2C, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_keypad, KEYPAD, 32768, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp1, SSP1, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp2, SSP2, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp3, SSP3, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwm0, PWM0, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwm1, PWM1, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ac97, AC97, 24576000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ac97conf, AC97CONF, 24576000, 0);
static DEFINE_PXA2_CKEN(pxa27x_msl, MSL, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usim, USIM, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_memstk, MEMSTK, 19500000, 0);
static DEFINE_PXA2_CKEN(pxa27x_im, IM, 0, 0);
static DEFINE_PXA2_CKEN(pxa27x_memc, MEMC, 0, 0);
static DEFINE_CK(pxa27x_lcd, LCD, &clk_pxa27x_lcd_ops);
static DEFINE_CK(pxa27x_camera, CAMERA, &clk_pxa27x_lcd_ops);
static DEFINE_CLK(pxa27x_mem, &clk_pxa27x_mem_ops, 0, 0);
static struct clk_lookup pxa27x_clkregs[] = {
INIT_CLKREG(&clk_pxa27x_lcd, "pxa2xx-fb", NULL),
INIT_CLKREG(&clk_pxa27x_camera, "pxa27x-camera.0", NULL),
INIT_CLKREG(&clk_pxa27x_ffuart, "pxa2xx-uart.0", NULL),
INIT_CLKREG(&clk_pxa27x_btuart, "pxa2xx-uart.1", NULL),
INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-uart.2", NULL),
INIT_CLKREG(&clk_pxa27x_i2s, "pxa2xx-i2s", NULL),
INIT_CLKREG(&clk_pxa27x_i2c, "pxa2xx-i2c.0", NULL),
INIT_CLKREG(&clk_pxa27x_usb, "pxa27x-udc", NULL),
INIT_CLKREG(&clk_pxa27x_mmc, "pxa2xx-mci.0", NULL),
INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-ir", "UARTCLK"),
INIT_CLKREG(&clk_pxa27x_ficp, "pxa2xx-ir", "FICPCLK"),
INIT_CLKREG(&clk_pxa27x_usbhost, "pxa27x-ohci", NULL),
INIT_CLKREG(&clk_pxa27x_pwri2c, "pxa2xx-i2c.1", NULL),
INIT_CLKREG(&clk_pxa27x_keypad, "pxa27x-keypad", NULL),
INIT_CLKREG(&clk_pxa27x_ssp1, "pxa27x-ssp.0", NULL),
INIT_CLKREG(&clk_pxa27x_ssp2, "pxa27x-ssp.1", NULL),
INIT_CLKREG(&clk_pxa27x_ssp3, "pxa27x-ssp.2", NULL),
INIT_CLKREG(&clk_pxa27x_pwm0, "pxa27x-pwm.0", NULL),
INIT_CLKREG(&clk_pxa27x_pwm1, "pxa27x-pwm.1", NULL),
INIT_CLKREG(&clk_pxa27x_ac97, NULL, "AC97CLK"),
INIT_CLKREG(&clk_pxa27x_ac97conf, NULL, "AC97CONFCLK"),
INIT_CLKREG(&clk_pxa27x_msl, NULL, "MSLCLK"),
INIT_CLKREG(&clk_pxa27x_usim, NULL, "USIMCLK"),
INIT_CLKREG(&clk_pxa27x_memstk, NULL, "MSTKCLK"),
INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
INIT_CLKREG(&clk_dummy, "pxa27x-gpio", NULL),
INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
};
#ifdef CONFIG_PM
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
/*
* allow platforms to override default PWRMODE setting used for PM_SUSPEND_MEM
*/
static unsigned int pwrmode = PWRMODE_SLEEP;
int __init pxa27x_set_pwrmode(unsigned int mode)
{
switch (mode) {
case PWRMODE_SLEEP:
case PWRMODE_DEEPSLEEP:
pwrmode = mode;
return 0;
}
return -EINVAL;
}
/*
* List of global PXA peripheral registers to preserve.
* More ones like CP and general purpose register values are preserved
* with the stack pointer in sleep.S.
*/
enum {
SLEEP_SAVE_PSTR,
SLEEP_SAVE_MDREFR,
SLEEP_SAVE_PCFR,
SLEEP_SAVE_COUNT
};
void pxa27x_cpu_pm_save(unsigned long *sleep_save)
{
sleep_save[SLEEP_SAVE_MDREFR] = __raw_readl(MDREFR);
SAVE(PCFR);
SAVE(PSTR);
}
void pxa27x_cpu_pm_restore(unsigned long *sleep_save)
{
__raw_writel(sleep_save[SLEEP_SAVE_MDREFR], MDREFR);
RESTORE(PCFR);
PSSR = PSSR_RDH | PSSR_PH;
RESTORE(PSTR);
}
void pxa27x_cpu_pm_enter(suspend_state_t state)
{
extern void pxa_cpu_standby(void);
#ifndef CONFIG_IWMMXT
u64 acc0;
asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
#endif
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR &= ~PCFR_FVC;
/* Clear edge-detect status register. */
PEDR = 0xDF12FE1B;
/* Clear reset status */
RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
switch (state) {
case PM_SUSPEND_STANDBY:
pxa_cpu_standby();
break;
case PM_SUSPEND_MEM:
cpu_suspend(pwrmode, pxa27x_finish_suspend);
#ifndef CONFIG_IWMMXT
asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
#endif
break;
}
}
static int pxa27x_cpu_pm_valid(suspend_state_t state)
{
return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
}
static int pxa27x_cpu_pm_prepare(void)
{
/* set resume return address */
PSPR = virt_to_phys(cpu_resume);
return 0;
}
static void pxa27x_cpu_pm_finish(void)
{
/* ensure not to come back here if it wasn't intended */
PSPR = 0;
}
static struct pxa_cpu_pm_fns pxa27x_cpu_pm_fns = {
.save_count = SLEEP_SAVE_COUNT,
.save = pxa27x_cpu_pm_save,
.restore = pxa27x_cpu_pm_restore,
.valid = pxa27x_cpu_pm_valid,
.enter = pxa27x_cpu_pm_enter,
.prepare = pxa27x_cpu_pm_prepare,
.finish = pxa27x_cpu_pm_finish,
};
static void __init pxa27x_init_pm(void)
{
pxa_cpu_pm_fns = &pxa27x_cpu_pm_fns;
}
#else
static inline void pxa27x_init_pm(void) {}
#endif
/* PXA27x: Various gpios can issue wakeup events. This logic only
* handles the simple cases, not the WEMUX2 and WEMUX3 options
*/
static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
{
int gpio = pxa_irq_to_gpio(d->irq);
uint32_t mask;
if (gpio >= 0 && gpio < 128)
return gpio_set_wake(gpio, on);
if (d->irq == IRQ_KEYPAD)
return keypad_set_wake(on);
switch (d->irq) {
case IRQ_RTCAlrm:
mask = PWER_RTC;
break;
case IRQ_USB:
mask = 1u << 26;
break;
default:
return -EINVAL;
}
if (on)
PWER |= mask;
else
PWER &=~mask;
return 0;
}
void __init pxa27x_init_irq(void)
{
pxa_init_irq(34, pxa27x_set_wake);
}
static struct map_desc pxa27x_io_desc[] __initdata = {
{ /* Mem Ctl */
.virtual = (unsigned long)SMEMC_VIRT,
.pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE),
.length = SMEMC_SIZE,
.type = MT_DEVICE
}, { /* UNCACHED_PHYS_0 */
.virtual = UNCACHED_PHYS_0,
.pfn = __phys_to_pfn(0x00000000),
.length = UNCACHED_PHYS_0_SIZE,
.type = MT_DEVICE
},
};
void __init pxa27x_map_io(void)
{
pxa_map_io();
iotable_init(ARRAY_AND_SIZE(pxa27x_io_desc));
pxa27x_get_clk_frequency_khz(1);
}
/*
* device registration specific to PXA27x.
*/
void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
{
local_irq_disable();
PCFR |= PCFR_PI2CEN;
local_irq_enable();
pxa_register_device(&pxa27x_device_i2c_power, info);
}
static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = {
.irq_base = PXA_GPIO_TO_IRQ(0),
.gpio_set_wake = gpio_set_wake,
};
static struct platform_device *devices[] __initdata = {
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
&pxa_device_asoc_ssp1,
&pxa_device_asoc_ssp2,
&pxa_device_asoc_ssp3,
&pxa_device_asoc_platform,
&sa1100_device_rtc,
&pxa_device_rtc,
&pxa27x_device_ssp1,
&pxa27x_device_ssp2,
&pxa27x_device_ssp3,
&pxa27x_device_pwm0,
&pxa27x_device_pwm1,
};
static int __init pxa27x_init(void)
{
int ret = 0;
if (cpu_is_pxa27x()) {
reset_status = RCSR;
clkdev_add_table(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs));
if ((ret = pxa_init_dma(IRQ_DMA, 32)))
return ret;
pxa27x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
register_syscore_ops(&pxa2xx_clock_syscore_ops);
pxa_register_device(&pxa27x_device_gpio, &pxa27x_gpio_info);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
return ret;
}
postcore_initcall(pxa27x_init);
| gpl-2.0 |
MassStash/htc_m8wl_kernel_sense_4.4.4 | drivers/misc/mpu3050/mlsl-kernel.c | 1179 | 9648 | /*
$License:
Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
$
*/
#include "mlsl.h"
#include "mpu-i2c.h"
/* ------------ */
/* - Defines. - */
/* ------------ */
/* ---------------------- */
/* - Types definitions. - */
/* ---------------------- */
/* --------------------- */
/* - Function p-types. - */
/* --------------------- */
/**
* @brief used to open the I2C or SPI serial port.
* This port is used to send and receive data to the MPU device.
* @param portNum
* The COM port number associated with the device in use.
* @return ML_SUCCESS if successful, a non-zero error code otherwise.
*/
tMLError MLSLSerialOpen(char const *port, void **sl_handle)
{
return ML_SUCCESS;
}
/**
* @brief used to reset any buffering the driver may be doing
* @return ML_SUCCESS if successful, a non-zero error code otherwise.
*/
tMLError MLSLSerialReset(void *sl_handle)
{
return ML_SUCCESS;
}
/**
* @brief used to close the I2C or SPI serial port.
* This port is used to send and receive data to the MPU device.
* @return ML_SUCCESS if successful, a non-zero error code otherwise.
*/
tMLError MLSLSerialClose(void *sl_handle)
{
return ML_SUCCESS;
}
/**
* @brief used to read a single byte of data.
* This should be sent by I2C or SPI.
*
* @param slaveAddr I2C slave address of device.
* @param registerAddr Register address to read.
* @param data Single byte of data to read.
*
* @return ML_SUCCESS if the command is successful, an error code otherwise.
*/
tMLError MLSLSerialWriteSingle(void *sl_handle,
unsigned char slaveAddr,
unsigned char registerAddr,
unsigned char data)
{
return sensor_i2c_write_register((struct i2c_adapter *) sl_handle,
slaveAddr, registerAddr, data);
}
/**
* @brief used to write multiple bytes of data from registers.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param registerAddr Register address to write.
* @param length Length of burst of data.
* @param data Pointer to block of data.
*
* @return ML_SUCCESS if successful, a non-zero error code otherwise.
*/
tMLError MLSLSerialWrite(void *sl_handle,
unsigned char slaveAddr,
unsigned short length, unsigned char const *data)
{
tMLError result;
const unsigned short dataLength = length - 1;
const unsigned char startRegAddr = data[0];
unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1];
unsigned short bytesWritten = 0;
while (bytesWritten < dataLength) {
unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE,
dataLength - bytesWritten);
if (bytesWritten == 0) {
result = sensor_i2c_write((struct i2c_adapter *)
sl_handle, slaveAddr,
1 + thisLen, data);
} else {
/* manually increment register addr between chunks */
i2cWrite[0] = startRegAddr + bytesWritten;
memcpy(&i2cWrite[1], &data[1 + bytesWritten],
thisLen);
result = sensor_i2c_write((struct i2c_adapter *)
sl_handle, slaveAddr,
1 + thisLen, i2cWrite);
}
if (ML_SUCCESS != result)
return result;
bytesWritten += thisLen;
}
return ML_SUCCESS;
}
/**
* @brief used to read multiple bytes of data from registers.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param registerAddr Register address to read.
* @param length Length of burst of data.
* @param data Pointer to block of data.
*
* @return Zero if successful; an error code otherwise
*/
tMLError MLSLSerialRead(void *sl_handle,
unsigned char slaveAddr,
unsigned char registerAddr,
unsigned short length, unsigned char *data)
{
tMLError result;
unsigned short bytesRead = 0;
if (registerAddr == MPUREG_FIFO_R_W
|| registerAddr == MPUREG_MEM_R_W) {
return ML_ERROR_INVALID_PARAMETER;
}
while (bytesRead < length) {
unsigned short thisLen =
min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
result =
sensor_i2c_read((struct i2c_adapter *) sl_handle,
slaveAddr, registerAddr + bytesRead,
thisLen, &data[bytesRead]);
if (ML_SUCCESS != result)
return result;
bytesRead += thisLen;
}
return ML_SUCCESS;
}
/**
* @brief used to write multiple bytes of data to the memory.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param memAddr The location in the memory to write to.
* @param length Length of burst data.
* @param data Pointer to block of data.
*
* @return Zero if successful; an error code otherwise
*/
tMLError MLSLSerialWriteMem(void *sl_handle,
unsigned char slaveAddr,
unsigned short memAddr,
unsigned short length,
unsigned char const *data)
{
tMLError result;
unsigned short bytesWritten = 0;
if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
printk
("memory read length (%d B) extends beyond its limits (%d) "
"if started at location %d\n", length,
MPU_MEM_BANK_SIZE, memAddr & 0xFF);
return ML_ERROR_INVALID_PARAMETER;
}
while (bytesWritten < length) {
unsigned short thisLen =
min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten);
result =
mpu_memory_write((struct i2c_adapter *) sl_handle,
slaveAddr, memAddr + bytesWritten,
thisLen, &data[bytesWritten]);
if (ML_SUCCESS != result)
return result;
bytesWritten += thisLen;
}
return ML_SUCCESS;
}
/**
* @brief used to read multiple bytes of data from the memory.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param memAddr The location in the memory to read from.
* @param length Length of burst data.
* @param data Pointer to block of data.
*
* @return Zero if successful; an error code otherwise
*/
tMLError MLSLSerialReadMem(void *sl_handle,
unsigned char slaveAddr,
unsigned short memAddr,
unsigned short length, unsigned char *data)
{
tMLError result;
unsigned short bytesRead = 0;
if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) {
printk
("memory read length (%d B) extends beyond its limits (%d) "
"if started at location %d\n", length,
MPU_MEM_BANK_SIZE, memAddr & 0xFF);
return ML_ERROR_INVALID_PARAMETER;
}
while (bytesRead < length) {
unsigned short thisLen =
min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
result =
mpu_memory_read((struct i2c_adapter *) sl_handle,
slaveAddr, memAddr + bytesRead,
thisLen, &data[bytesRead]);
if (ML_SUCCESS != result)
return result;
bytesRead += thisLen;
}
return ML_SUCCESS;
}
/**
* @brief used to write multiple bytes of data to the fifo.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param length Length of burst of data.
* @param data Pointer to block of data.
*
* @return Zero if successful; an error code otherwise
*/
tMLError MLSLSerialWriteFifo(void *sl_handle,
unsigned char slaveAddr,
unsigned short length,
unsigned char const *data)
{
tMLError result;
unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1];
unsigned short bytesWritten = 0;
if (length > FIFO_HW_SIZE) {
printk(KERN_ERR
"maximum fifo write length is %d\n", FIFO_HW_SIZE);
return ML_ERROR_INVALID_PARAMETER;
}
while (bytesWritten < length) {
unsigned short thisLen =
min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten);
i2cWrite[0] = MPUREG_FIFO_R_W;
memcpy(&i2cWrite[1], &data[bytesWritten], thisLen);
result = sensor_i2c_write((struct i2c_adapter *) sl_handle,
slaveAddr, thisLen + 1,
i2cWrite);
if (ML_SUCCESS != result)
return result;
bytesWritten += thisLen;
}
return ML_SUCCESS;
}
/**
* @brief used to read multiple bytes of data from the fifo.
* This should be sent by I2C.
*
* @param slaveAddr I2C slave address of device.
* @param length Length of burst of data.
* @param data Pointer to block of data.
*
* @return Zero if successful; an error code otherwise
*/
tMLError MLSLSerialReadFifo(void *sl_handle,
unsigned char slaveAddr,
unsigned short length, unsigned char *data)
{
tMLError result;
unsigned short bytesRead = 0;
if (length > FIFO_HW_SIZE) {
printk(KERN_ERR
"maximum fifo read length is %d\n", FIFO_HW_SIZE);
return ML_ERROR_INVALID_PARAMETER;
}
while (bytesRead < length) {
unsigned short thisLen =
min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead);
result =
sensor_i2c_read((struct i2c_adapter *) sl_handle,
slaveAddr, MPUREG_FIFO_R_W, thisLen,
&data[bytesRead]);
if (ML_SUCCESS != result)
return result;
bytesRead += thisLen;
}
return ML_SUCCESS;
}
/**
* @}
*/
| gpl-2.0 |
mythos234/NamelessN910F-LL | arch/arm/mach-s3c64xx/mach-ncp.c | 2203 | 2530 | /*
* linux/arch/arm/mach-s3c64xx/mach-ncp.c
*
* Copyright (C) 2008-2009 Samsung Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <video/platform_lcd.h>
#include <video/samsung_fimd.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <plat/fb.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/samsung-time.h>
#include "common.h"
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg ncp_uartcfgs[] __initdata = {
/* REVISIT: NCP uses only serial 1, 2 */
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
};
static struct platform_device *ncp_devices[] __initdata = {
&s3c_device_hsmmc1,
&s3c_device_i2c0,
};
static struct map_desc ncp_iodesc[] __initdata = {};
static void __init ncp_map_io(void)
{
s3c64xx_init_io(ncp_iodesc, ARRAY_SIZE(ncp_iodesc));
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(ncp_uartcfgs, ARRAY_SIZE(ncp_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void __init ncp_machine_init(void)
{
s3c_i2c0_set_platdata(NULL);
platform_add_devices(ncp_devices, ARRAY_SIZE(ncp_devices));
}
MACHINE_START(NCP, "NCP")
/* Maintainer: Samsung Electronics */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
.map_io = ncp_map_io,
.init_machine = ncp_machine_init,
.init_late = s3c64xx_init_late,
.init_time = samsung_timer_init,
.restart = s3c64xx_restart,
MACHINE_END
| gpl-2.0 |
evitareul/android_kernel_htc_evitareul | drivers/media/dvb/frontends/dib9000.c | 2203 | 71458 | /*
* Linux-DVB Driver for DiBcom's DiB9000 and demodulator-family.
*
* Copyright (C) 2005-10 DiBcom (http://www.dibcom.fr/)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include "dvb_math.h"
#include "dvb_frontend.h"
#include "dib9000.h"
#include "dibx000_common.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
#define MAX_NUMBER_OF_FRONTENDS 6
struct i2c_device {
struct i2c_adapter *i2c_adap;
u8 i2c_addr;
u8 *i2c_read_buffer;
u8 *i2c_write_buffer;
};
/* lock */
#define DIB_LOCK struct mutex
#define DibAcquireLock(lock) do { if (mutex_lock_interruptible(lock) < 0) dprintk("could not get the lock"); } while (0)
#define DibReleaseLock(lock) mutex_unlock(lock)
#define DibInitLock(lock) mutex_init(lock)
#define DibFreeLock(lock)
struct dib9000_pid_ctrl {
#define DIB9000_PID_FILTER_CTRL 0
#define DIB9000_PID_FILTER 1
u8 cmd;
u8 id;
u16 pid;
u8 onoff;
};
struct dib9000_state {
struct i2c_device i2c;
struct dibx000_i2c_master i2c_master;
struct i2c_adapter tuner_adap;
struct i2c_adapter component_bus;
u16 revision;
u8 reg_offs;
enum frontend_tune_state tune_state;
u32 status;
struct dvb_frontend_parametersContext channel_status;
u8 fe_id;
#define DIB9000_GPIO_DEFAULT_DIRECTIONS 0xffff
u16 gpio_dir;
#define DIB9000_GPIO_DEFAULT_VALUES 0x0000
u16 gpio_val;
#define DIB9000_GPIO_DEFAULT_PWM_POS 0xffff
u16 gpio_pwm_pos;
union { /* common for all chips */
struct {
u8 mobile_mode:1;
} host;
struct {
struct dib9000_fe_memory_map {
u16 addr;
u16 size;
} fe_mm[18];
u8 memcmd;
DIB_LOCK mbx_if_lock; /* to protect read/write operations */
DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */
DIB_LOCK mem_lock; /* to protect the memory accesses */
DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */
#define MBX_MAX_WORDS (256 - 200 - 2)
#define DIB9000_MSG_CACHE_SIZE 2
u16 message_cache[DIB9000_MSG_CACHE_SIZE][MBX_MAX_WORDS];
u8 fw_is_running;
} risc;
} platform;
union { /* common for all platforms */
struct {
struct dib9000_config cfg;
} d9;
} chip;
struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS];
u16 component_bus_speed;
/* for the I2C transfer */
struct i2c_msg msg[2];
u8 i2c_write_buffer[255];
u8 i2c_read_buffer[255];
DIB_LOCK demod_lock;
u8 get_frontend_internal;
struct dib9000_pid_ctrl pid_ctrl[10];
s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */
};
static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
enum dib9000_power_mode {
DIB9000_POWER_ALL = 0,
DIB9000_POWER_NO,
DIB9000_POWER_INTERF_ANALOG_AGC,
DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD,
DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD,
DIB9000_POWER_INTERFACE_ONLY,
};
enum dib9000_out_messages {
OUT_MSG_HBM_ACK,
OUT_MSG_HOST_BUF_FAIL,
OUT_MSG_REQ_VERSION,
OUT_MSG_BRIDGE_I2C_W,
OUT_MSG_BRIDGE_I2C_R,
OUT_MSG_BRIDGE_APB_W,
OUT_MSG_BRIDGE_APB_R,
OUT_MSG_SCAN_CHANNEL,
OUT_MSG_MONIT_DEMOD,
OUT_MSG_CONF_GPIO,
OUT_MSG_DEBUG_HELP,
OUT_MSG_SUBBAND_SEL,
OUT_MSG_ENABLE_TIME_SLICE,
OUT_MSG_FE_FW_DL,
OUT_MSG_FE_CHANNEL_SEARCH,
OUT_MSG_FE_CHANNEL_TUNE,
OUT_MSG_FE_SLEEP,
OUT_MSG_FE_SYNC,
OUT_MSG_CTL_MONIT,
OUT_MSG_CONF_SVC,
OUT_MSG_SET_HBM,
OUT_MSG_INIT_DEMOD,
OUT_MSG_ENABLE_DIVERSITY,
OUT_MSG_SET_OUTPUT_MODE,
OUT_MSG_SET_PRIORITARY_CHANNEL,
OUT_MSG_ACK_FRG,
OUT_MSG_INIT_PMU,
};
enum dib9000_in_messages {
IN_MSG_DATA,
IN_MSG_FRAME_INFO,
IN_MSG_CTL_MONIT,
IN_MSG_ACK_FREE_ITEM,
IN_MSG_DEBUG_BUF,
IN_MSG_MPE_MONITOR,
IN_MSG_RAWTS_MONITOR,
IN_MSG_END_BRIDGE_I2C_RW,
IN_MSG_END_BRIDGE_APB_RW,
IN_MSG_VERSION,
IN_MSG_END_OF_SCAN,
IN_MSG_MONIT_DEMOD,
IN_MSG_ERROR,
IN_MSG_FE_FW_DL_DONE,
IN_MSG_EVENT,
IN_MSG_ACK_CHANGE_SVC,
IN_MSG_HBM_PROF,
};
/* memory_access requests */
#define FE_MM_W_CHANNEL 0
#define FE_MM_W_FE_INFO 1
#define FE_MM_RW_SYNC 2
#define FE_SYNC_CHANNEL 1
#define FE_SYNC_W_GENERIC_MONIT 2
#define FE_SYNC_COMPONENT_ACCESS 3
#define FE_MM_R_CHANNEL_SEARCH_STATE 3
#define FE_MM_R_CHANNEL_UNION_CONTEXT 4
#define FE_MM_R_FE_INFO 5
#define FE_MM_R_FE_MONITOR 6
#define FE_MM_W_CHANNEL_HEAD 7
#define FE_MM_W_CHANNEL_UNION 8
#define FE_MM_W_CHANNEL_CONTEXT 9
#define FE_MM_R_CHANNEL_UNION 10
#define FE_MM_R_CHANNEL_CONTEXT 11
#define FE_MM_R_CHANNEL_TUNE_STATE 12
#define FE_MM_R_GENERIC_MONITORING_SIZE 13
#define FE_MM_W_GENERIC_MONITORING 14
#define FE_MM_R_GENERIC_MONITORING 15
#define FE_MM_W_COMPONENT_ACCESS 16
#define FE_MM_RW_COMPONENT_ACCESS_BUFFER 17
static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len);
static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len);
static u16 to_fw_output_mode(u16 mode)
{
switch (mode) {
case OUTMODE_HIGH_Z:
return 0;
case OUTMODE_MPEG2_PAR_GATED_CLK:
return 4;
case OUTMODE_MPEG2_PAR_CONT_CLK:
return 8;
case OUTMODE_MPEG2_SERIAL:
return 16;
case OUTMODE_DIVERSITY:
return 128;
case OUTMODE_MPEG2_FIFO:
return 2;
case OUTMODE_ANALOG_ADC:
return 1;
default:
return 0;
}
}
static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
int ret;
if (state->platform.risc.fw_is_running && (reg < 1024))
return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len);
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c.i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 2;
state->msg[1].addr = state->i2c.i2c_addr >> 1;
state->msg[1].flags = I2C_M_RD;
state->msg[1].buf = b;
state->msg[1].len = len;
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
state->i2c_write_buffer[0] |= (1 << 5);
if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
state->i2c_write_buffer[0] |= (1 << 4);
do {
l = len < chunk_size ? len : chunk_size;
state->msg[1].len = l;
state->msg[1].buf = b;
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
if (ret != 0) {
dprintk("i2c read error on %d", reg);
return -EREMOTEIO;
}
b += l;
len -= l;
if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
reg += l / 2;
} while ((ret == 0) && len);
return 0;
}
static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
{
struct i2c_msg msg[2] = {
{.addr = i2c->i2c_addr >> 1, .flags = 0,
.buf = i2c->i2c_write_buffer, .len = 2},
{.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD,
.buf = i2c->i2c_read_buffer, .len = 2},
};
i2c->i2c_write_buffer[0] = reg >> 8;
i2c->i2c_write_buffer[1] = reg & 0xff;
if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
dprintk("read register %x error", reg);
return 0;
}
return (i2c->i2c_read_buffer[0] << 8) | i2c->i2c_read_buffer[1];
}
static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg)
{
if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, 0) != 0)
return 0;
return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
}
static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute)
{
if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2,
attribute) != 0)
return 0;
return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
}
#define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
int ret;
if (state->platform.risc.fw_is_running && (reg < 1024)) {
if (dib9000_risc_apb_access_write
(state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0)
return -EINVAL;
return 0;
}
memset(&state->msg[0], 0, sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c.i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = len + 2;
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = (reg) & 0xff;
if (attribute & DATA_BUS_ACCESS_MODE_8BIT)
state->i2c_write_buffer[0] |= (1 << 5);
if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
state->i2c_write_buffer[0] |= (1 << 4);
do {
l = len < chunk_size ? len : chunk_size;
state->msg[0].len = l + 2;
memcpy(&state->i2c_write_buffer[2], buf, l);
ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0;
buf += l;
len -= l;
if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT))
reg += l / 2;
} while ((ret == 0) && len);
return ret;
}
static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
{
struct i2c_msg msg = {
.addr = i2c->i2c_addr >> 1, .flags = 0,
.buf = i2c->i2c_write_buffer, .len = 4
};
i2c->i2c_write_buffer[0] = (reg >> 8) & 0xff;
i2c->i2c_write_buffer[1] = reg & 0xff;
i2c->i2c_write_buffer[2] = (val >> 8) & 0xff;
i2c->i2c_write_buffer[3] = val & 0xff;
return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
}
static inline int dib9000_write_word(struct dib9000_state *state, u16 reg, u16 val)
{
u8 b[2] = { val >> 8, val & 0xff };
return dib9000_write16_attr(state, reg, b, 2, 0);
}
static inline int dib9000_write_word_attr(struct dib9000_state *state, u16 reg, u16 val, u16 attribute)
{
u8 b[2] = { val >> 8, val & 0xff };
return dib9000_write16_attr(state, reg, b, 2, attribute);
}
#define dib9000_write(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, 0)
#define dib9000_write16_noinc(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
#define dib9000_write16_noinc_attr(state, reg, buf, len, attribute) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | (attribute))
#define dib9000_mbx_send(state, id, data, len) dib9000_mbx_send_attr(state, id, data, len, 0)
#define dib9000_mbx_get_message(state, id, msg, len) dib9000_mbx_get_message_attr(state, id, msg, len, 0)
#define MAC_IRQ (1 << 1)
#define IRQ_POL_MSK (1 << 4)
#define dib9000_risc_mem_read_chunks(state, b, len) dib9000_read16_attr(state, 1063, b, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
#define dib9000_risc_mem_write_chunks(state, buf, len) dib9000_write16_attr(state, 1063, buf, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
static void dib9000_risc_mem_setup_cmd(struct dib9000_state *state, u32 addr, u32 len, u8 reading)
{
u8 b[14] = { 0 };
/* dprintk("%d memcmd: %d %d %d\n", state->fe_id, addr, addr+len, len); */
/* b[0] = 0 << 7; */
b[1] = 1;
/* b[2] = 0; */
/* b[3] = 0; */
b[4] = (u8) (addr >> 8);
b[5] = (u8) (addr & 0xff);
/* b[10] = 0; */
/* b[11] = 0; */
b[12] = (u8) (addr >> 8);
b[13] = (u8) (addr & 0xff);
addr += len;
/* b[6] = 0; */
/* b[7] = 0; */
b[8] = (u8) (addr >> 8);
b[9] = (u8) (addr & 0xff);
dib9000_write(state, 1056, b, 14);
if (reading)
dib9000_write_word(state, 1056, (1 << 15) | 1);
state->platform.risc.memcmd = -1; /* if it was called directly reset it - to force a future setup-call to set it */
}
static void dib9000_risc_mem_setup(struct dib9000_state *state, u8 cmd)
{
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd & 0x7f];
/* decide whether we need to "refresh" the memory controller */
if (state->platform.risc.memcmd == cmd && /* same command */
!(cmd & 0x80 && m->size < 67)) /* and we do not want to read something with less than 67 bytes looping - working around a bug in the memory controller */
return;
dib9000_risc_mem_setup_cmd(state, m->addr, m->size, cmd & 0x80);
state->platform.risc.memcmd = cmd;
}
static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u16 len)
{
if (!state->platform.risc.fw_is_running)
return -EIO;
DibAcquireLock(&state->platform.risc.mem_lock);
dib9000_risc_mem_setup(state, cmd | 0x80);
dib9000_risc_mem_read_chunks(state, b, len);
DibReleaseLock(&state->platform.risc.mem_lock);
return 0;
}
static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8 * b)
{
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd];
if (!state->platform.risc.fw_is_running)
return -EIO;
DibAcquireLock(&state->platform.risc.mem_lock);
dib9000_risc_mem_setup(state, cmd);
dib9000_risc_mem_write_chunks(state, b, m->size);
DibReleaseLock(&state->platform.risc.mem_lock);
return 0;
}
static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u16 key, const u8 * code, u32 len)
{
u16 offs;
if (risc_id == 1)
offs = 16;
else
offs = 0;
/* config crtl reg */
dib9000_write_word(state, 1024 + offs, 0x000f);
dib9000_write_word(state, 1025 + offs, 0);
dib9000_write_word(state, 1031 + offs, key);
dprintk("going to download %dB of microcode", len);
if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
return -EIO;
}
dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
return 0;
}
static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
{
u16 mbox_offs;
u16 reset_reg;
u16 tries = 1000;
if (risc_id == 1)
mbox_offs = 16;
else
mbox_offs = 0;
/* Reset mailbox */
dib9000_write_word(state, 1027 + mbox_offs, 0x8000);
/* Read reset status */
do {
reset_reg = dib9000_read_word(state, 1027 + mbox_offs);
msleep(100);
} while ((reset_reg & 0x8000) && --tries);
if (reset_reg & 0x8000) {
dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
return -EIO;
}
dprintk("MBX: initialized");
return 0;
}
#define MAX_MAILBOX_TRY 100
static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data, u8 len, u16 attr)
{
u8 *d, b[2];
u16 tmp;
u16 size;
u32 i;
int ret = 0;
if (!state->platform.risc.fw_is_running)
return -EINVAL;
DibAcquireLock(&state->platform.risc.mbx_if_lock);
tmp = MAX_MAILBOX_TRY;
do {
size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
dprintk("MBX: RISC mbx full, retrying");
msleep(100);
} else
break;
} while (1);
/*dprintk( "MBX: size: %d", size); */
if (tmp == 0) {
ret = -EINVAL;
goto out;
}
#ifdef DUMP_MSG
dprintk("--> %02x %d ", id, len + 1);
for (i = 0; i < len; i++)
dprintk("%04x ", data[i]);
dprintk("\n");
#endif
/* byte-order conversion - works on big (where it is not necessary) or little endian */
d = (u8 *) data;
for (i = 0; i < len; i++) {
tmp = data[i];
*d++ = tmp >> 8;
*d++ = tmp & 0xff;
}
/* write msg */
b[0] = id;
b[1] = len + 1;
if (dib9000_write16_noinc_attr(state, 1045, b, 2, attr) != 0 || dib9000_write16_noinc_attr(state, 1045, (u8 *) data, len * 2, attr) != 0) {
ret = -EIO;
goto out;
}
/* update register nb_mes_in_RX */
ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr);
out:
DibReleaseLock(&state->platform.risc.mbx_if_lock);
return ret;
}
static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id, u16 attr)
{
#ifdef DUMP_MSG
u16 *d = data;
#endif
u16 tmp, i;
u8 size;
u8 mc_base;
if (!state->platform.risc.fw_is_running)
return 0;
DibAcquireLock(&state->platform.risc.mbx_if_lock);
if (risc_id == 1)
mc_base = 16;
else
mc_base = 0;
/* Length and type in the first word */
*data = dib9000_read_word_attr(state, 1029 + mc_base, attr);
size = *data & 0xff;
if (size <= MBX_MAX_WORDS) {
data++;
size--; /* Initial word already read */
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, size * 2, attr);
/* to word conversion */
for (i = 0; i < size; i++) {
tmp = *data;
*data = (tmp >> 8) | (tmp << 8);
data++;
}
#ifdef DUMP_MSG
dprintk("<-- ");
for (i = 0; i < size + 1; i++)
dprintk("%04x ", d[i]);
dprintk("\n");
#endif
} else {
dprintk("MBX: message is too big for message cache (%d), flushing message", size);
size--; /* Initial word already read */
while (size--)
dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
}
/* Update register nb_mes_in_TX */
dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr);
DibReleaseLock(&state->platform.risc.mbx_if_lock);
return size + 1;
}
static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 size)
{
u32 ts = data[1] << 16 | data[0];
char *b = (char *)&data[2];
b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */
if (*b == '~') {
b++;
dprintk(b);
} else
dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>");
return 1;
}
static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
{
int i;
u8 size;
u16 *block;
/* find a free slot */
for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
block = state->platform.risc.message_cache[i];
if (*block == 0) {
size = dib9000_mbx_read(state, block, 1, attr);
/* dprintk( "MBX: fetched %04x message to cache", *block); */
switch (*block >> 8) {
case IN_MSG_DEBUG_BUF:
dib9000_risc_debug_buf(state, block + 1, size); /* debug-messages are going to be printed right away */
*block = 0; /* free the block */
break;
#if 0
case IN_MSG_DATA: /* FE-TRACE */
dib9000_risc_data_process(state, block + 1, size);
*block = 0;
break;
#endif
default:
break;
}
return 1;
}
}
dprintk("MBX: no free cache-slot found for new message...");
return -1;
}
static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr)
{
if (risc_id == 0)
return (u8) (dib9000_read_word_attr(state, 1028, attr) >> 10) & 0x1f; /* 5 bit field */
else
return (u8) (dib9000_read_word_attr(state, 1044, attr) >> 8) & 0x7f; /* 7 bit field */
}
static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
{
int ret = 0;
u16 tmp;
if (!state->platform.risc.fw_is_running)
return -1;
DibAcquireLock(&state->platform.risc.mbx_lock);
if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */
ret = dib9000_mbx_fetch_to_cache(state, attr);
tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */
/* if (tmp) */
/* dprintk( "cleared IRQ: %x", tmp); */
DibReleaseLock(&state->platform.risc.mbx_lock);
return ret;
}
static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16 * msg, u8 * size, u16 attr)
{
u8 i;
u16 *block;
u16 timeout = 30;
*msg = 0;
do {
/* dib9000_mbx_get_from_cache(); */
for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) {
block = state->platform.risc.message_cache[i];
if ((*block >> 8) == id) {
*size = (*block & 0xff) - 1;
memcpy(msg, block + 1, (*size) * 2);
*block = 0; /* free the block */
i = 0; /* signal that we found a message */
break;
}
}
if (i == 0)
break;
if (dib9000_mbx_process(state, attr) == -1) /* try to fetch one message - if any */
return -1;
} while (--timeout);
if (timeout == 0) {
dprintk("waiting for message %d timed out", id);
return -1;
}
return i == 0;
}
static int dib9000_risc_check_version(struct dib9000_state *state)
{
u8 r[4];
u8 size;
u16 fw_version = 0;
if (dib9000_mbx_send(state, OUT_MSG_REQ_VERSION, &fw_version, 1) != 0)
return -EIO;
if (dib9000_mbx_get_message(state, IN_MSG_VERSION, (u16 *) r, &size) < 0)
return -EIO;
fw_version = (r[0] << 8) | r[1];
dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
if ((fw_version >> 10) != 7)
return -EINVAL;
switch (fw_version & 0x3ff) {
case 11:
case 12:
case 14:
case 15:
case 16:
case 17:
break;
default:
dprintk("RISC: invalid firmware version");
return -EINVAL;
}
dprintk("RISC: valid firmware version");
return 0;
}
static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB)
{
/* Reconfig pool mac ram */
dib9000_write_word(state, 1225, 0x02); /* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */
dib9000_write_word(state, 1226, 0x05);
/* Toggles IP crypto to Host APB interface. */
dib9000_write_word(state, 1542, 1);
/* Set jump and no jump in the dma box */
dib9000_write_word(state, 1074, 0);
dib9000_write_word(state, 1075, 0);
/* Set MAC as APB Master. */
dib9000_write_word(state, 1237, 0);
/* Reset the RISCs */
if (codeA != NULL)
dib9000_write_word(state, 1024, 2);
else
dib9000_write_word(state, 1024, 15);
if (codeB != NULL)
dib9000_write_word(state, 1040, 2);
if (codeA != NULL)
dib9000_firmware_download(state, 0, 0x1234, codeA, lenA);
if (codeB != NULL)
dib9000_firmware_download(state, 1, 0x1234, codeB, lenB);
/* Run the RISCs */
if (codeA != NULL)
dib9000_write_word(state, 1024, 0);
if (codeB != NULL)
dib9000_write_word(state, 1040, 0);
if (codeA != NULL)
if (dib9000_mbx_host_init(state, 0) != 0)
return -EIO;
if (codeB != NULL)
if (dib9000_mbx_host_init(state, 1) != 0)
return -EIO;
msleep(100);
state->platform.risc.fw_is_running = 1;
if (dib9000_risc_check_version(state) != 0)
return -EINVAL;
state->platform.risc.memcmd = 0xff;
return 0;
}
static u16 dib9000_identify(struct i2c_device *client)
{
u16 value;
value = dib9000_i2c_read16(client, 896);
if (value != 0x01b3) {
dprintk("wrong Vendor ID (0x%x)", value);
return 0;
}
value = dib9000_i2c_read16(client, 897);
if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
dprintk("wrong Device ID (0x%x)", value);
return 0;
}
/* protect this driver to be used with 7000PC */
if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
dprintk("this driver does not work with DiB7000PC");
return 0;
}
switch (value) {
case 0x4000:
dprintk("found DiB7000MA/PA/MB/PB");
break;
case 0x4001:
dprintk("found DiB7000HC");
break;
case 0x4002:
dprintk("found DiB7000MC");
break;
case 0x4003:
dprintk("found DiB9000A");
break;
case 0x4004:
dprintk("found DiB9000H");
break;
case 0x4005:
dprintk("found DiB9000M");
break;
}
return value;
}
static void dib9000_set_power_mode(struct dib9000_state *state, enum dib9000_power_mode mode)
{
/* by default everything is going to be powered off */
u16 reg_903 = 0x3fff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906;
u8 offset;
if (state->revision == 0x4003 || state->revision == 0x4004 || state->revision == 0x4005)
offset = 1;
else
offset = 0;
reg_906 = dib9000_read_word(state, 906 + offset) | 0x3; /* keep settings for RISC */
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
case DIB9000_POWER_ALL:
reg_903 = 0x0000;
reg_904 = 0x0000;
reg_905 = 0x0000;
reg_906 = 0x0000;
break;
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */
case DIB9000_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2));
break;
case DIB9000_POWER_INTERF_ANALOG_AGC:
reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10));
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2));
reg_906 &= ~((1 << 0));
break;
case DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD:
reg_903 = 0x0000;
reg_904 = 0x801f;
reg_905 = 0x0000;
reg_906 &= ~((1 << 0));
break;
case DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD:
reg_903 = 0x0000;
reg_904 = 0x8000;
reg_905 = 0x010b;
reg_906 &= ~((1 << 0));
break;
default:
case DIB9000_POWER_NO:
break;
}
/* always power down unused parts */
if (!state->platform.host.mobile_mode)
reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1);
/* P_sdio_select_clk = 0 on MC and after */
if (state->revision != 0x4000)
reg_906 <<= 1;
dib9000_write_word(state, 903 + offset, reg_903);
dib9000_write_word(state, 904 + offset, reg_904);
dib9000_write_word(state, 905 + offset, reg_905);
dib9000_write_word(state, 906 + offset, reg_906);
}
static int dib9000_fw_reset(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
dib9000_write_word(state, 1817, 0x0003);
dib9000_write_word(state, 1227, 1);
dib9000_write_word(state, 1227, 0);
switch ((state->revision = dib9000_identify(&state->i2c))) {
case 0x4003:
case 0x4004:
case 0x4005:
state->reg_offs = 1;
break;
default:
return -EINVAL;
}
/* reset the i2c-master to use the host interface */
dibx000_reset_i2c_master(&state->i2c_master);
dib9000_set_power_mode(state, DIB9000_POWER_ALL);
/* unforce divstr regardless whether i2c enumeration was done or not */
dib9000_write_word(state, 1794, dib9000_read_word(state, 1794) & ~(1 << 1));
dib9000_write_word(state, 1796, 0);
dib9000_write_word(state, 1805, 0x805);
/* restart all parts */
dib9000_write_word(state, 898, 0xffff);
dib9000_write_word(state, 899, 0xffff);
dib9000_write_word(state, 900, 0x0001);
dib9000_write_word(state, 901, 0xff19);
dib9000_write_word(state, 902, 0x003c);
dib9000_write_word(state, 898, 0);
dib9000_write_word(state, 899, 0);
dib9000_write_word(state, 900, 0);
dib9000_write_word(state, 901, 0);
dib9000_write_word(state, 902, 0);
dib9000_write_word(state, 911, state->chip.d9.cfg.if_drives);
dib9000_set_power_mode(state, DIB9000_POWER_INTERFACE_ONLY);
return 0;
}
static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len)
{
u16 mb[10];
u8 i, s;
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
/* dprintk( "APB access thru rd fw %d %x", address, attribute); */
mb[0] = (u16) address;
mb[1] = len / 2;
dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_R, mb, 2, attribute);
switch (dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute)) {
case 1:
s--;
for (i = 0; i < s; i++) {
b[i * 2] = (mb[i + 1] >> 8) & 0xff;
b[i * 2 + 1] = (mb[i + 1]) & 0xff;
}
return 0;
default:
return -EIO;
}
return -EIO;
}
static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len)
{
u16 mb[10];
u8 s, i;
if (address >= 1024 || !state->platform.risc.fw_is_running)
return -EINVAL;
/* dprintk( "APB access thru wr fw %d %x", address, attribute); */
mb[0] = (unsigned short)address;
for (i = 0; i < len && i < 20; i += 2)
mb[1 + (i / 2)] = (b[i] << 8 | b[i + 1]);
dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_W, mb, 1 + len / 2, attribute);
return dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute) == 1 ? 0 : -EINVAL;
}
static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i)
{
u8 index_loop = 10;
if (!state->platform.risc.fw_is_running)
return 0;
dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i);
do {
dib9000_risc_mem_read(state, FE_MM_RW_SYNC, state->i2c_read_buffer, 1);
} while (state->i2c_read_buffer[0] && index_loop--);
if (index_loop > 0)
return 0;
return -EIO;
}
static int dib9000_fw_init(struct dib9000_state *state)
{
struct dibGPIOFunction *f;
u16 b[40] = { 0 };
u8 i;
u8 size;
if (dib9000_fw_boot(state, NULL, 0, state->chip.d9.cfg.microcode_B_fe_buffer, state->chip.d9.cfg.microcode_B_fe_size) != 0)
return -EIO;
/* initialize the firmware */
for (i = 0; i < ARRAY_SIZE(state->chip.d9.cfg.gpio_function); i++) {
f = &state->chip.d9.cfg.gpio_function[i];
if (f->mask) {
switch (f->function) {
case BOARD_GPIO_FUNCTION_COMPONENT_ON:
b[0] = (u16) f->mask;
b[1] = (u16) f->direction;
b[2] = (u16) f->value;
break;
case BOARD_GPIO_FUNCTION_COMPONENT_OFF:
b[3] = (u16) f->mask;
b[4] = (u16) f->direction;
b[5] = (u16) f->value;
break;
}
}
}
if (dib9000_mbx_send(state, OUT_MSG_CONF_GPIO, b, 15) != 0)
return -EIO;
/* subband */
b[0] = state->chip.d9.cfg.subband.size; /* type == 0 -> GPIO - PWM not yet supported */
for (i = 0; i < state->chip.d9.cfg.subband.size; i++) {
b[1 + i * 4] = state->chip.d9.cfg.subband.subband[i].f_mhz;
b[2 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.mask;
b[3 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.direction;
b[4 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.value;
}
b[1 + i * 4] = 0; /* fe_id */
if (dib9000_mbx_send(state, OUT_MSG_SUBBAND_SEL, b, 2 + 4 * i) != 0)
return -EIO;
/* 0 - id, 1 - no_of_frontends */
b[0] = (0 << 8) | 1;
/* 0 = i2c-address demod, 0 = tuner */
b[1] = (0 << 8) | (0);
b[2] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000) >> 16) & 0xffff);
b[3] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000)) & 0xffff);
b[4] = (u16) ((state->chip.d9.cfg.vcxo_timer >> 16) & 0xffff);
b[5] = (u16) ((state->chip.d9.cfg.vcxo_timer) & 0xffff);
b[6] = (u16) ((state->chip.d9.cfg.timing_frequency >> 16) & 0xffff);
b[7] = (u16) ((state->chip.d9.cfg.timing_frequency) & 0xffff);
b[29] = state->chip.d9.cfg.if_drives;
if (dib9000_mbx_send(state, OUT_MSG_INIT_DEMOD, b, ARRAY_SIZE(b)) != 0)
return -EIO;
if (dib9000_mbx_send(state, OUT_MSG_FE_FW_DL, NULL, 0) != 0)
return -EIO;
if (dib9000_mbx_get_message(state, IN_MSG_FE_FW_DL_DONE, b, &size) < 0)
return -EIO;
if (size > ARRAY_SIZE(b)) {
dprintk("error : firmware returned %dbytes needed but the used buffer has only %dbytes\n Firmware init ABORTED", size,
(int)ARRAY_SIZE(b));
return -EINVAL;
}
for (i = 0; i < size; i += 2) {
state->platform.risc.fe_mm[i / 2].addr = b[i + 0];
state->platform.risc.fe_mm[i / 2].size = b[i + 1];
}
return 0;
}
static void dib9000_fw_set_channel_head(struct dib9000_state *state, struct dvb_frontend_parameters *ch)
{
u8 b[9];
u32 freq = state->fe[0]->dtv_property_cache.frequency / 1000;
if (state->fe_id % 2)
freq += 101;
b[0] = (u8) ((freq >> 0) & 0xff);
b[1] = (u8) ((freq >> 8) & 0xff);
b[2] = (u8) ((freq >> 16) & 0xff);
b[3] = (u8) ((freq >> 24) & 0xff);
b[4] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 0) & 0xff);
b[5] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 8) & 0xff);
b[6] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 16) & 0xff);
b[7] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 24) & 0xff);
b[8] = 0x80; /* do not wait for CELL ID when doing autosearch */
if (state->fe[0]->dtv_property_cache.delivery_system == SYS_DVBT)
b[8] |= 1;
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_HEAD, b);
}
static int dib9000_fw_get_channel(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
s8 spectrum_inversion;
s8 nfft;
s8 guard;
s8 constellation;
s8 hrch;
s8 alpha;
s8 code_rate_hp;
s8 code_rate_lp;
s8 select_hp;
s8 intlv_native;
};
struct dibDVBTChannel *ch;
int ret = 0;
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
goto error;
ret = -EIO;
}
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION,
state->i2c_read_buffer, sizeof(struct dibDVBTChannel));
ch = (struct dibDVBTChannel *)state->i2c_read_buffer;
switch (ch->spectrum_inversion & 0x7) {
case 1:
state->fe[0]->dtv_property_cache.inversion = INVERSION_ON;
break;
case 0:
state->fe[0]->dtv_property_cache.inversion = INVERSION_OFF;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO;
break;
}
switch (ch->nfft) {
case 0:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K;
break;
case 2:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K;
break;
case 1:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO;
break;
}
switch (ch->guard) {
case 0:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32;
break;
case 1:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16;
break;
case 2:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8;
break;
case 3:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO;
break;
}
switch (ch->constellation) {
case 2:
state->fe[0]->dtv_property_cache.modulation = QAM_64;
break;
case 1:
state->fe[0]->dtv_property_cache.modulation = QAM_16;
break;
case 0:
state->fe[0]->dtv_property_cache.modulation = QPSK;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.modulation = QAM_AUTO;
break;
}
switch (ch->hrch) {
case 0:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE;
break;
case 1:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_1;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO;
break;
}
switch (ch->code_rate_hp) {
case 1:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2;
break;
case 2:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_2_3;
break;
case 3:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_3_4;
break;
case 5:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_5_6;
break;
case 7:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_7_8;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO;
break;
}
switch (ch->code_rate_lp) {
case 1:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2;
break;
case 2:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_2_3;
break;
case 3:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_3_4;
break;
case 5:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_5_6;
break;
case 7:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_7_8;
break;
default:
case -1:
state->fe[0]->dtv_property_cache.code_rate_LP = FEC_AUTO;
break;
}
error:
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return ret;
}
static int dib9000_fw_set_channel_union(struct dvb_frontend *fe, struct dvb_frontend_parameters *channel)
{
struct dib9000_state *state = fe->demodulator_priv;
struct dibDVBTChannel {
s8 spectrum_inversion;
s8 nfft;
s8 guard;
s8 constellation;
s8 hrch;
s8 alpha;
s8 code_rate_hp;
s8 code_rate_lp;
s8 select_hp;
s8 intlv_native;
};
struct dibDVBTChannel ch;
switch (state->fe[0]->dtv_property_cache.inversion) {
case INVERSION_ON:
ch.spectrum_inversion = 1;
break;
case INVERSION_OFF:
ch.spectrum_inversion = 0;
break;
default:
case INVERSION_AUTO:
ch.spectrum_inversion = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.transmission_mode) {
case TRANSMISSION_MODE_2K:
ch.nfft = 0;
break;
case TRANSMISSION_MODE_4K:
ch.nfft = 2;
break;
case TRANSMISSION_MODE_8K:
ch.nfft = 1;
break;
default:
case TRANSMISSION_MODE_AUTO:
ch.nfft = 1;
break;
}
switch (state->fe[0]->dtv_property_cache.guard_interval) {
case GUARD_INTERVAL_1_32:
ch.guard = 0;
break;
case GUARD_INTERVAL_1_16:
ch.guard = 1;
break;
case GUARD_INTERVAL_1_8:
ch.guard = 2;
break;
case GUARD_INTERVAL_1_4:
ch.guard = 3;
break;
default:
case GUARD_INTERVAL_AUTO:
ch.guard = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.modulation) {
case QAM_64:
ch.constellation = 2;
break;
case QAM_16:
ch.constellation = 1;
break;
case QPSK:
ch.constellation = 0;
break;
default:
case QAM_AUTO:
ch.constellation = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.hierarchy) {
case HIERARCHY_NONE:
ch.hrch = 0;
break;
case HIERARCHY_1:
case HIERARCHY_2:
case HIERARCHY_4:
ch.hrch = 1;
break;
default:
case HIERARCHY_AUTO:
ch.hrch = -1;
break;
}
ch.alpha = 1;
switch (state->fe[0]->dtv_property_cache.code_rate_HP) {
case FEC_1_2:
ch.code_rate_hp = 1;
break;
case FEC_2_3:
ch.code_rate_hp = 2;
break;
case FEC_3_4:
ch.code_rate_hp = 3;
break;
case FEC_5_6:
ch.code_rate_hp = 5;
break;
case FEC_7_8:
ch.code_rate_hp = 7;
break;
default:
case FEC_AUTO:
ch.code_rate_hp = -1;
break;
}
switch (state->fe[0]->dtv_property_cache.code_rate_LP) {
case FEC_1_2:
ch.code_rate_lp = 1;
break;
case FEC_2_3:
ch.code_rate_lp = 2;
break;
case FEC_3_4:
ch.code_rate_lp = 3;
break;
case FEC_5_6:
ch.code_rate_lp = 5;
break;
case FEC_7_8:
ch.code_rate_lp = 7;
break;
default:
case FEC_AUTO:
ch.code_rate_lp = -1;
break;
}
ch.select_hp = 1;
ch.intlv_native = 1;
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_UNION, (u8 *) &ch);
return 0;
}
static int dib9000_fw_tune(struct dvb_frontend *fe, struct dvb_frontend_parameters *ch)
{
struct dib9000_state *state = fe->demodulator_priv;
int ret = 10, search = state->channel_status.status == CHANNEL_STATUS_PARAMETERS_UNKNOWN;
s8 i;
switch (state->tune_state) {
case CT_DEMOD_START:
dib9000_fw_set_channel_head(state, ch);
/* write the channel context - a channel is initialized to 0, so it is OK */
dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_CONTEXT, (u8 *) fe_info);
dib9000_risc_mem_write(state, FE_MM_W_FE_INFO, (u8 *) fe_info);
if (search)
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_SEARCH, NULL, 0);
else {
dib9000_fw_set_channel_union(fe, ch);
dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_TUNE, NULL, 0);
}
state->tune_state = CT_DEMOD_STEP_1;
break;
case CT_DEMOD_STEP_1:
if (search)
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, state->i2c_read_buffer, 1);
else
dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, state->i2c_read_buffer, 1);
i = (s8)state->i2c_read_buffer[0];
switch (i) { /* something happened */
case 0:
break;
case -2: /* tps locks are "slower" than MPEG locks -> even in autosearch data is OK here */
if (search)
state->status = FE_STATUS_DEMOD_SUCCESS;
else {
state->tune_state = CT_DEMOD_STOP;
state->status = FE_STATUS_LOCKED;
}
break;
default:
state->status = FE_STATUS_TUNE_FAILED;
state->tune_state = CT_DEMOD_STOP;
break;
}
break;
default:
ret = FE_CALLBACK_TIME_NEVER;
break;
}
return ret;
}
static int dib9000_fw_set_diversity_in(struct dvb_frontend *fe, int onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 mode = (u16) onoff;
return dib9000_mbx_send(state, OUT_MSG_ENABLE_DIVERSITY, &mode, 1);
}
static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 outreg, smo_mode;
dprintk("setting output mode for demod %p to %d", fe, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
outreg = (1 << 10); /* 0x0400 */
break;
case OUTMODE_MPEG2_PAR_CONT_CLK:
outreg = (1 << 10) | (1 << 6); /* 0x0440 */
break;
case OUTMODE_MPEG2_SERIAL:
outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */
break;
case OUTMODE_DIVERSITY:
outreg = (1 << 10) | (4 << 6); /* 0x0500 */
break;
case OUTMODE_MPEG2_FIFO:
outreg = (1 << 10) | (5 << 6);
break;
case OUTMODE_HIGH_Z:
outreg = 0;
break;
default:
dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
return -EINVAL;
}
dib9000_write_word(state, 1795, outreg);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK:
case OUTMODE_MPEG2_PAR_CONT_CLK:
case OUTMODE_MPEG2_SERIAL:
case OUTMODE_MPEG2_FIFO:
smo_mode = (dib9000_read_word(state, 295) & 0x0010) | (1 << 1);
if (state->chip.d9.cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5);
dib9000_write_word(state, 295, smo_mode);
break;
}
outreg = to_fw_output_mode(mode);
return dib9000_mbx_send(state, OUT_MSG_SET_OUTPUT_MODE, &outreg, 1);
}
static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
u16 i, len, t, index_msg;
for (index_msg = 0; index_msg < num; index_msg++) {
if (msg[index_msg].flags & I2C_M_RD) { /* read */
len = msg[index_msg].len;
if (len > 16)
len = 16;
if (dib9000_read_word(state, 790) != 0)
dprintk("TunerITF: read busy");
dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
dib9000_write_word(state, 787, (len / 2) - 1);
dib9000_write_word(state, 786, 1); /* start read */
i = 1000;
while (dib9000_read_word(state, 790) != (len / 2) && i)
i--;
if (i == 0)
dprintk("TunerITF: read failed");
for (i = 0; i < len; i += 2) {
t = dib9000_read_word(state, 785);
msg[index_msg].buf[i] = (t >> 8) & 0xff;
msg[index_msg].buf[i + 1] = (t) & 0xff;
}
if (dib9000_read_word(state, 790) != 0)
dprintk("TunerITF: read more data than expected");
} else {
i = 1000;
while (dib9000_read_word(state, 789) && i)
i--;
if (i == 0)
dprintk("TunerITF: write busy");
len = msg[index_msg].len;
if (len > 16)
len = 16;
for (i = 0; i < len; i += 2)
dib9000_write_word(state, 785, (msg[index_msg].buf[i] << 8) | msg[index_msg].buf[i + 1]);
dib9000_write_word(state, 784, (u16) msg[index_msg].addr);
dib9000_write_word(state, 787, (len / 2) - 1);
dib9000_write_word(state, 786, 0); /* start write */
i = 1000;
while (dib9000_read_word(state, 791) > 0 && i)
i--;
if (i == 0)
dprintk("TunerITF: write failed");
}
}
return num;
}
int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed)
{
struct dib9000_state *state = fe->demodulator_priv;
state->component_bus_speed = speed;
return 0;
}
EXPORT_SYMBOL(dib9000_fw_set_component_bus_speed);
static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num)
{
struct dib9000_state *state = i2c_get_adapdata(i2c_adap);
u8 type = 0; /* I2C */
u8 port = DIBX000_I2C_INTERFACE_GPIO_3_4;
u16 scl = state->component_bus_speed; /* SCL frequency */
struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[FE_MM_RW_COMPONENT_ACCESS_BUFFER];
u8 p[13] = { 0 };
p[0] = type;
p[1] = port;
p[2] = msg[0].addr << 1;
p[3] = (u8) scl & 0xff; /* scl */
p[4] = (u8) (scl >> 8);
p[7] = 0;
p[8] = 0;
p[9] = (u8) (msg[0].len);
p[10] = (u8) (msg[0].len >> 8);
if ((num > 1) && (msg[1].flags & I2C_M_RD)) {
p[11] = (u8) (msg[1].len);
p[12] = (u8) (msg[1].len >> 8);
} else {
p[11] = 0;
p[12] = 0;
}
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
dib9000_risc_mem_write(state, FE_MM_W_COMPONENT_ACCESS, p);
{ /* write-part */
dib9000_risc_mem_setup_cmd(state, m->addr, msg[0].len, 0);
dib9000_risc_mem_write_chunks(state, msg[0].buf, msg[0].len);
}
/* do the transaction */
if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) {
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return 0;
}
/* read back any possible result */
if ((num > 1) && (msg[1].flags & I2C_M_RD))
dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
return num;
}
static u32 dib9000_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm dib9000_tuner_algo = {
.master_xfer = dib9000_tuner_xfer,
.functionality = dib9000_i2c_func,
};
static struct i2c_algorithm dib9000_component_bus_algo = {
.master_xfer = dib9000_fw_component_bus_xfer,
.functionality = dib9000_i2c_func,
};
struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe)
{
struct dib9000_state *st = fe->demodulator_priv;
return &st->tuner_adap;
}
EXPORT_SYMBOL(dib9000_get_tuner_interface);
struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe)
{
struct dib9000_state *st = fe->demodulator_priv;
return &st->component_bus;
}
EXPORT_SYMBOL(dib9000_get_component_bus_interface);
struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating)
{
struct dib9000_state *st = fe->demodulator_priv;
return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
}
EXPORT_SYMBOL(dib9000_get_i2c_master);
int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c)
{
struct dib9000_state *st = fe->demodulator_priv;
st->i2c.i2c_adap = i2c;
return 0;
}
EXPORT_SYMBOL(dib9000_set_i2c_adapter);
static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
{
st->gpio_dir = dib9000_read_word(st, 773);
st->gpio_dir &= ~(1 << num); /* reset the direction bit */
st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */
dib9000_write_word(st, 773, st->gpio_dir);
st->gpio_val = dib9000_read_word(st, 774);
st->gpio_val &= ~(1 << num); /* reset the direction bit */
st->gpio_val |= (val & 0x01) << num; /* set the new value */
dib9000_write_word(st, 774, st->gpio_val);
dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
return 0;
}
int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_cfg_gpio(state, num, dir, val);
}
EXPORT_SYMBOL(dib9000_set_gpio);
int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 val;
int ret;
if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
/* postpone the pid filtering cmd */
dprintk("pid filter cmd postpone");
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
return 0;
}
DibAcquireLock(&state->demod_lock);
val = dib9000_read_word(state, 294 + 1) & 0xffef;
val |= (onoff & 0x1) << 4;
dprintk("PID filter enabled %d", onoff);
ret = dib9000_write_word(state, 294 + 1, val);
DibReleaseLock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl);
int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib9000_state *state = fe->demodulator_priv;
int ret;
if (state->pid_ctrl_index != -2) {
/* postpone the pid filtering cmd */
dprintk("pid filter postpone");
if (state->pid_ctrl_index < 9) {
state->pid_ctrl_index++;
state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
state->pid_ctrl[state->pid_ctrl_index].id = id;
state->pid_ctrl[state->pid_ctrl_index].pid = pid;
state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
} else
dprintk("can not add any more pid ctrl cmd");
return 0;
}
DibAcquireLock(&state->demod_lock);
dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
ret = dib9000_write_word(state, 300 + 1 + id,
onoff ? (1 << 13) | pid : 0);
DibReleaseLock(&state->demod_lock);
return ret;
}
EXPORT_SYMBOL(dib9000_fw_pid_filter);
int dib9000_firmware_post_pll_init(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_fw_init(state);
}
EXPORT_SYMBOL(dib9000_firmware_post_pll_init);
static void dib9000_release(struct dvb_frontend *demod)
{
struct dib9000_state *st = demod->demodulator_priv;
u8 index_frontend;
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++)
dvb_frontend_detach(st->fe[index_frontend]);
DibFreeLock(&state->platform.risc.mbx_if_lock);
DibFreeLock(&state->platform.risc.mbx_lock);
DibFreeLock(&state->platform.risc.mem_lock);
DibFreeLock(&state->platform.risc.mem_mbx_lock);
DibFreeLock(&state->demod_lock);
dibx000_exit_i2c_master(&st->i2c_master);
i2c_del_adapter(&st->tuner_adap);
i2c_del_adapter(&st->component_bus);
kfree(st->fe[0]);
kfree(st);
}
static int dib9000_wakeup(struct dvb_frontend *fe)
{
return 0;
}
static int dib9000_sleep(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
int ret = 0;
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]);
if (ret < 0)
goto error;
}
ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0);
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static int dib9000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend, sub_index_frontend;
fe_status_t stat;
int ret = 0;
if (state->get_frontend_internal == 0)
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
if (stat & FE_HAS_SYNC) {
dprintk("TPS lock on the slave%i", index_frontend);
/* synchronize the cache with the other frontends */
state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep);
for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL);
sub_index_frontend++) {
if (sub_index_frontend != index_frontend) {
state->fe[sub_index_frontend]->dtv_property_cache.modulation =
state->fe[index_frontend]->dtv_property_cache.modulation;
state->fe[sub_index_frontend]->dtv_property_cache.inversion =
state->fe[index_frontend]->dtv_property_cache.inversion;
state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode =
state->fe[index_frontend]->dtv_property_cache.transmission_mode;
state->fe[sub_index_frontend]->dtv_property_cache.guard_interval =
state->fe[index_frontend]->dtv_property_cache.guard_interval;
state->fe[sub_index_frontend]->dtv_property_cache.hierarchy =
state->fe[index_frontend]->dtv_property_cache.hierarchy;
state->fe[sub_index_frontend]->dtv_property_cache.code_rate_HP =
state->fe[index_frontend]->dtv_property_cache.code_rate_HP;
state->fe[sub_index_frontend]->dtv_property_cache.code_rate_LP =
state->fe[index_frontend]->dtv_property_cache.code_rate_LP;
state->fe[sub_index_frontend]->dtv_property_cache.rolloff =
state->fe[index_frontend]->dtv_property_cache.rolloff;
}
}
ret = 0;
goto return_value;
}
}
/* get the channel from master chip */
ret = dib9000_fw_get_channel(fe, fep);
if (ret != 0)
goto return_value;
/* synchronize the cache with the other frontends */
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion;
state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode;
state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval;
state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation;
state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy;
state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP;
state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP;
state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff;
}
ret = 0;
return_value:
if (state->get_frontend_internal == 0)
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state)
{
struct dib9000_state *state = fe->demodulator_priv;
state->tune_state = tune_state;
if (tune_state == CT_DEMOD_START)
state->status = FE_STATUS_TUNE_PENDING;
return 0;
}
static u32 dib9000_get_status(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return state->status;
}
static int dib9000_set_channel_status(struct dvb_frontend *fe, struct dvb_frontend_parametersContext *channel_status)
{
struct dib9000_state *state = fe->demodulator_priv;
memcpy(&state->channel_status, channel_status, sizeof(struct dvb_frontend_parametersContext));
return 0;
}
static int dib9000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep)
{
struct dib9000_state *state = fe->demodulator_priv;
int sleep_time, sleep_time_slave;
u32 frontend_status;
u8 nbr_pending, exit_condition, index_frontend, index_frontend_success;
struct dvb_frontend_parametersContext channel_status;
/* check that the correct parameters are set */
if (state->fe[0]->dtv_property_cache.frequency == 0) {
dprintk("dib9000: must specify frequency ");
return 0;
}
if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
dprintk("dib9000: must specify bandwidth ");
return 0;
}
state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
DibAcquireLock(&state->demod_lock);
fe->dtv_property_cache.delivery_system = SYS_DVBT;
/* set the master status */
if (fep->u.ofdm.transmission_mode == TRANSMISSION_MODE_AUTO ||
fep->u.ofdm.guard_interval == GUARD_INTERVAL_AUTO || fep->u.ofdm.constellation == QAM_AUTO || fep->u.ofdm.code_rate_HP == FEC_AUTO) {
/* no channel specified, autosearch the channel */
state->channel_status.status = CHANNEL_STATUS_PARAMETERS_UNKNOWN;
} else
state->channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
/* set mode and status for the different frontends */
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
dib9000_fw_set_diversity_in(state->fe[index_frontend], 1);
/* synchronization of the cache */
memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties));
state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_DVBT;
dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z);
dib9000_set_channel_status(state->fe[index_frontend], &state->channel_status);
dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
}
/* actual tune */
exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */
index_frontend_success = 0;
do {
sleep_time = dib9000_fw_tune(state->fe[0], NULL);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
sleep_time = sleep_time_slave;
}
if (sleep_time != FE_CALLBACK_TIME_NEVER)
msleep(sleep_time / 10);
else
break;
nbr_pending = 0;
exit_condition = 0;
index_frontend_success = 0;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
frontend_status = -dib9000_get_status(state->fe[index_frontend]);
if (frontend_status > -FE_STATUS_TUNE_PENDING) {
exit_condition = 2; /* tune success */
index_frontend_success = index_frontend;
break;
}
if (frontend_status == -FE_STATUS_TUNE_PENDING)
nbr_pending++; /* some frontends are still tuning */
}
if ((exit_condition != 2) && (nbr_pending == 0))
exit_condition = 1; /* if all tune are done and no success, exit: tune failed */
} while (exit_condition == 0);
/* check the tune result */
if (exit_condition == 1) { /* tune failed */
dprintk("tune failed");
DibReleaseLock(&state->demod_lock);
/* tune failed; put all the pid filtering cmd to junk */
state->pid_ctrl_index = -1;
return 0;
}
dprintk("tune success on frontend%i", index_frontend_success);
/* synchronize all the channel cache */
state->get_frontend_internal = 1;
dib9000_get_frontend(state->fe[0], fep);
state->get_frontend_internal = 0;
/* retune the other frontends with the found channel */
channel_status.status = CHANNEL_STATUS_PARAMETERS_SET;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
/* only retune the frontends which was not tuned success */
if (index_frontend != index_frontend_success) {
dib9000_set_channel_status(state->fe[index_frontend], &channel_status);
dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START);
}
}
do {
sleep_time = FE_CALLBACK_TIME_NEVER;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
if (index_frontend != index_frontend_success) {
sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend], NULL);
if (sleep_time == FE_CALLBACK_TIME_NEVER)
sleep_time = sleep_time_slave;
else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time))
sleep_time = sleep_time_slave;
}
}
if (sleep_time != FE_CALLBACK_TIME_NEVER)
msleep(sleep_time / 10);
else
break;
nbr_pending = 0;
for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
if (index_frontend != index_frontend_success) {
frontend_status = -dib9000_get_status(state->fe[index_frontend]);
if ((index_frontend != index_frontend_success) && (frontend_status == -FE_STATUS_TUNE_PENDING))
nbr_pending++; /* some frontends are still tuning */
}
}
} while (nbr_pending != 0);
/* set the output mode */
dib9000_fw_set_output_mode(state->fe[0], state->chip.d9.cfg.output_mode);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY);
/* turn off the diversity for the last frontend */
dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0);
DibReleaseLock(&state->demod_lock);
if (state->pid_ctrl_index >= 0) {
u8 index_pid_filter_cmd;
u8 pid_ctrl_index = state->pid_ctrl_index;
state->pid_ctrl_index = -2;
for (index_pid_filter_cmd = 0;
index_pid_filter_cmd <= pid_ctrl_index;
index_pid_filter_cmd++) {
if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL)
dib9000_fw_pid_filter_ctrl(state->fe[0],
state->pid_ctrl[index_pid_filter_cmd].onoff);
else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER)
dib9000_fw_pid_filter(state->fe[0],
state->pid_ctrl[index_pid_filter_cmd].id,
state->pid_ctrl[index_pid_filter_cmd].pid,
state->pid_ctrl[index_pid_filter_cmd].onoff);
}
}
/* do not postpone any more the pid filtering */
state->pid_ctrl_index = -2;
return 0;
}
static u16 dib9000_read_lock(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
return dib9000_read_word(state, 535);
}
static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u16 lock = 0, lock_slave = 0;
DibAcquireLock(&state->demod_lock);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
lock_slave |= dib9000_read_lock(state->fe[index_frontend]);
lock = dib9000_read_word(state, 535);
*stat = 0;
if ((lock & 0x8000) || (lock_slave & 0x8000))
*stat |= FE_HAS_SIGNAL;
if ((lock & 0x3000) || (lock_slave & 0x3000))
*stat |= FE_HAS_CARRIER;
if ((lock & 0x0100) || (lock_slave & 0x0100))
*stat |= FE_HAS_VITERBI;
if (((lock & 0x0038) == 0x38) || ((lock_slave & 0x0038) == 0x38))
*stat |= FE_HAS_SYNC;
if ((lock & 0x0008) || (lock_slave & 0x0008))
*stat |= FE_HAS_LOCK;
DibReleaseLock(&state->demod_lock);
return 0;
}
static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c;
int ret = 0;
DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR,
state->i2c_read_buffer, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
c = (u16 *)state->i2c_read_buffer;
*ber = c[10] << 16 | c[11];
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u16 *c = (u16 *)state->i2c_read_buffer;
u16 val;
int ret = 0;
DibAcquireLock(&state->demod_lock);
*strength = 0;
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val);
if (val > 65535 - *strength)
*strength = 65535;
else
*strength += val;
}
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
val = 65535 - c[4];
if (val > 65535 - *strength)
*strength = 65535;
else
*strength += val;
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
static u32 dib9000_get_snr(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c = (u16 *)state->i2c_read_buffer;
u32 n, s, exp;
u16 val;
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0)
return -EIO;
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
val = c[7];
n = (val >> 4) & 0xff;
exp = ((val & 0xf) << 2);
val = c[8];
exp += ((val >> 14) & 0x3);
if ((exp & 0x20) != 0)
exp -= 0x40;
n <<= exp + 16;
s = (val >> 6) & 0xFF;
exp = (val & 0x3F);
if ((exp & 0x20) != 0)
exp -= 0x40;
s <<= exp + 16;
if (n > 0) {
u32 t = (s / n) << 16;
return t + ((s << 16) - n * t) / n;
}
return 0xffffffff;
}
static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend;
u32 snr_master;
DibAcquireLock(&state->demod_lock);
snr_master = dib9000_get_snr(fe);
for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
snr_master += dib9000_get_snr(state->fe[index_frontend]);
if ((snr_master >> 16) != 0) {
snr_master = 10 * intlog10(snr_master >> 16);
*snr = snr_master / ((1 << 24) / 10);
} else
*snr = 0;
DibReleaseLock(&state->demod_lock);
return 0;
}
static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
{
struct dib9000_state *state = fe->demodulator_priv;
u16 *c = (u16 *)state->i2c_read_buffer;
int ret = 0;
DibAcquireLock(&state->demod_lock);
DibAcquireLock(&state->platform.risc.mem_mbx_lock);
if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
ret = -EIO;
goto error;
}
dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2);
DibReleaseLock(&state->platform.risc.mem_mbx_lock);
*unc = c[12];
error:
DibReleaseLock(&state->demod_lock);
return ret;
}
int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr)
{
int k = 0, ret = 0;
u8 new_addr = 0;
struct i2c_device client = {.i2c_adap = i2c };
client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_write_buffer) {
dprintk("%s: not enough memory", __func__);
return -ENOMEM;
}
client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
if (!client.i2c_read_buffer) {
dprintk("%s: not enough memory", __func__);
ret = -ENOMEM;
goto error_memory;
}
client.i2c_addr = default_addr + 16;
dib9000_i2c_write16(&client, 1796, 0x0);
for (k = no_of_demods - 1; k >= 0; k--) {
/* designated i2c address */
new_addr = first_addr + (k << 1);
client.i2c_addr = default_addr;
dib9000_i2c_write16(&client, 1817, 3);
dib9000_i2c_write16(&client, 1796, 0);
dib9000_i2c_write16(&client, 1227, 1);
dib9000_i2c_write16(&client, 1227, 0);
client.i2c_addr = new_addr;
dib9000_i2c_write16(&client, 1817, 3);
dib9000_i2c_write16(&client, 1796, 0);
dib9000_i2c_write16(&client, 1227, 1);
dib9000_i2c_write16(&client, 1227, 0);
if (dib9000_identify(&client) == 0) {
client.i2c_addr = default_addr;
if (dib9000_identify(&client) == 0) {
dprintk("DiB9000 #%d: not identified", k);
ret = -EIO;
goto error;
}
}
dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
new_addr = first_addr | (k << 1);
client.i2c_addr = new_addr;
dib9000_i2c_write16(&client, 1794, (new_addr << 2));
dib9000_i2c_write16(&client, 1795, 0);
}
error:
kfree(client.i2c_read_buffer);
error_memory:
kfree(client.i2c_write_buffer);
return ret;
}
EXPORT_SYMBOL(dib9000_i2c_enumeration);
int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend = 1;
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
state->fe[index_frontend] = fe_slave;
return 0;
}
dprintk("too many slave frontend");
return -ENOMEM;
}
EXPORT_SYMBOL(dib9000_set_slave_frontend);
int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
{
struct dib9000_state *state = fe->demodulator_priv;
u8 index_frontend = 1;
while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
index_frontend++;
if (index_frontend != 1) {
dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
state->fe[index_frontend] = NULL;
return 0;
}
dprintk("no frontend to be removed");
return -ENODEV;
}
EXPORT_SYMBOL(dib9000_remove_slave_frontend);
struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index)
{
struct dib9000_state *state = fe->demodulator_priv;
if (slave_index >= MAX_NUMBER_OF_FRONTENDS)
return NULL;
return state->fe[slave_index];
}
EXPORT_SYMBOL(dib9000_get_slave_frontend);
static struct dvb_frontend_ops dib9000_ops;
struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
{
struct dvb_frontend *fe;
struct dib9000_state *st;
st = kzalloc(sizeof(struct dib9000_state), GFP_KERNEL);
if (st == NULL)
return NULL;
fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL);
if (fe == NULL) {
kfree(st);
return NULL;
}
memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config));
st->i2c.i2c_adap = i2c_adap;
st->i2c.i2c_addr = i2c_addr;
st->i2c.i2c_write_buffer = st->i2c_write_buffer;
st->i2c.i2c_read_buffer = st->i2c_read_buffer;
st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS;
st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES;
st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS;
DibInitLock(&st->platform.risc.mbx_if_lock);
DibInitLock(&st->platform.risc.mbx_lock);
DibInitLock(&st->platform.risc.mem_lock);
DibInitLock(&st->platform.risc.mem_mbx_lock);
DibInitLock(&st->demod_lock);
st->get_frontend_internal = 0;
st->pid_ctrl_index = -2;
st->fe[0] = fe;
fe->demodulator_priv = st;
memcpy(&st->fe[0]->ops, &dib9000_ops, sizeof(struct dvb_frontend_ops));
/* Ensure the output mode remains at the previous default if it's
* not specifically set by the caller.
*/
if ((st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK))
st->chip.d9.cfg.output_mode = OUTMODE_MPEG2_FIFO;
if (dib9000_identify(&st->i2c) == 0)
goto error;
dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c.i2c_adap, st->i2c.i2c_addr);
st->tuner_adap.dev.parent = i2c_adap->dev.parent;
strncpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS", sizeof(st->tuner_adap.name));
st->tuner_adap.algo = &dib9000_tuner_algo;
st->tuner_adap.algo_data = NULL;
i2c_set_adapdata(&st->tuner_adap, st);
if (i2c_add_adapter(&st->tuner_adap) < 0)
goto error;
st->component_bus.dev.parent = i2c_adap->dev.parent;
strncpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS", sizeof(st->component_bus.name));
st->component_bus.algo = &dib9000_component_bus_algo;
st->component_bus.algo_data = NULL;
st->component_bus_speed = 340;
i2c_set_adapdata(&st->component_bus, st);
if (i2c_add_adapter(&st->component_bus) < 0)
goto component_bus_add_error;
dib9000_fw_reset(fe);
return fe;
component_bus_add_error:
i2c_del_adapter(&st->tuner_adap);
error:
kfree(st);
return NULL;
}
EXPORT_SYMBOL(dib9000_attach);
static struct dvb_frontend_ops dib9000_ops = {
.info = {
.name = "DiBcom 9000",
.type = FE_OFDM,
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO,
},
.release = dib9000_release,
.init = dib9000_wakeup,
.sleep = dib9000_sleep,
.set_frontend = dib9000_set_frontend,
.get_tune_settings = dib9000_fe_get_tune_settings,
.get_frontend = dib9000_get_frontend,
.read_status = dib9000_read_status,
.read_ber = dib9000_read_ber,
.read_signal_strength = dib9000_read_signal_strength,
.read_snr = dib9000_read_snr,
.read_ucblocks = dib9000_read_unc_blocks,
};
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zhaochengw/android_kernel_blackview_p1-pro | arch/arm/mach-s3c64xx/mach-ncp.c | 2203 | 2530 | /*
* linux/arch/arm/mach-s3c64xx/mach-ncp.c
*
* Copyright (C) 2008-2009 Samsung Electronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <video/platform_lcd.h>
#include <video/samsung_fimd.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <plat/fb.h>
#include <plat/clock.h>
#include <plat/devs.h>
#include <plat/cpu.h>
#include <plat/samsung-time.h>
#include "common.h"
#define UCON S3C2410_UCON_DEFAULT
#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE
#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
static struct s3c2410_uartcfg ncp_uartcfgs[] __initdata = {
/* REVISIT: NCP uses only serial 1, 2 */
[0] = {
.hwport = 0,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = UCON,
.ulcon = ULCON,
.ufcon = UFCON,
},
};
static struct platform_device *ncp_devices[] __initdata = {
&s3c_device_hsmmc1,
&s3c_device_i2c0,
};
static struct map_desc ncp_iodesc[] __initdata = {};
static void __init ncp_map_io(void)
{
s3c64xx_init_io(ncp_iodesc, ARRAY_SIZE(ncp_iodesc));
s3c24xx_init_clocks(12000000);
s3c24xx_init_uarts(ncp_uartcfgs, ARRAY_SIZE(ncp_uartcfgs));
samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4);
}
static void __init ncp_machine_init(void)
{
s3c_i2c0_set_platdata(NULL);
platform_add_devices(ncp_devices, ARRAY_SIZE(ncp_devices));
}
MACHINE_START(NCP, "NCP")
/* Maintainer: Samsung Electronics */
.atag_offset = 0x100,
.init_irq = s3c6410_init_irq,
.map_io = ncp_map_io,
.init_machine = ncp_machine_init,
.init_late = s3c64xx_init_late,
.init_time = samsung_timer_init,
.restart = s3c64xx_restart,
MACHINE_END
| gpl-2.0 |
CyanogenMod/lge-kernel-sniper | drivers/acpi/acpica/rscalc.c | 3227 | 18229 | /*******************************************************************************
*
* Module Name: rscalc - Calculate stream and list lengths
*
******************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rscalc")
/* Local prototypes */
static u8 acpi_rs_count_set_bits(u16 bit_field);
static acpi_rs_length
acpi_rs_struct_option_length(struct acpi_resource_source *resource_source);
static u32
acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
/*******************************************************************************
*
* FUNCTION: acpi_rs_count_set_bits
*
* PARAMETERS: bit_field - Field in which to count bits
*
* RETURN: Number of bits set within the field
*
* DESCRIPTION: Count the number of bits set in a resource field. Used for
* (Short descriptor) interrupt and DMA lists.
*
******************************************************************************/
static u8 acpi_rs_count_set_bits(u16 bit_field)
{
u8 bits_set;
ACPI_FUNCTION_ENTRY();
for (bits_set = 0; bit_field; bits_set++) {
/* Zero the least significant bit that is set */
bit_field &= (u16) (bit_field - 1);
}
return bits_set;
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_struct_option_length
*
* PARAMETERS: resource_source - Pointer to optional descriptor field
*
* RETURN: Status
*
* DESCRIPTION: Common code to handle optional resource_source_index and
* resource_source fields in some Large descriptors. Used during
* list-to-stream conversion
*
******************************************************************************/
static acpi_rs_length
acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
{
ACPI_FUNCTION_ENTRY();
/*
* If the resource_source string is valid, return the size of the string
* (string_length includes the NULL terminator) plus the size of the
* resource_source_index (1).
*/
if (resource_source->string_ptr) {
return ((acpi_rs_length) (resource_source->string_length + 1));
}
return (0);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_stream_option_length
*
* PARAMETERS: resource_length - Length from the resource header
* minimum_total_length - Minimum length of this resource, before
* any optional fields. Includes header size
*
* RETURN: Length of optional string (0 if no string present)
*
* DESCRIPTION: Common code to handle optional resource_source_index and
* resource_source fields in some Large descriptors. Used during
* stream-to-list conversion
*
******************************************************************************/
static u32
acpi_rs_stream_option_length(u32 resource_length,
u32 minimum_aml_resource_length)
{
u32 string_length = 0;
ACPI_FUNCTION_ENTRY();
/*
* The resource_source_index and resource_source are optional elements of some
* Large-type resource descriptors.
*/
/*
* If the length of the actual resource descriptor is greater than the ACPI
* spec-defined minimum length, it means that a resource_source_index exists
* and is followed by a (required) null terminated string. The string length
* (including the null terminator) is the resource length minus the minimum
* length, minus one byte for the resource_source_index itself.
*/
if (resource_length > minimum_aml_resource_length) {
/* Compute the length of the optional string */
string_length =
resource_length - minimum_aml_resource_length - 1;
}
/*
* Round the length up to a multiple of the native word in order to
* guarantee that the entire resource descriptor is native word aligned
*/
return ((u32) ACPI_ROUND_UP_TO_NATIVE_WORD(string_length));
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_aml_length
*
* PARAMETERS: Resource - Pointer to the resource linked list
* size_needed - Where the required size is returned
*
* RETURN: Status
*
* DESCRIPTION: Takes a linked list of internal resource descriptors and
* calculates the size buffer needed to hold the corresponding
* external resource byte stream.
*
******************************************************************************/
acpi_status
acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
{
acpi_size aml_size_needed = 0;
acpi_rs_length total_size;
ACPI_FUNCTION_TRACE(rs_get_aml_length);
/* Traverse entire list of internal resource descriptors */
while (resource) {
/* Validate the descriptor type */
if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
}
/* Get the base size of the (external stream) resource descriptor */
total_size = acpi_gbl_aml_resource_sizes[resource->type];
/*
* Augment the base size for descriptors with optional and/or
* variable-length fields
*/
switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
/* Length can be 3 or 2 */
if (resource->data.irq.descriptor_length == 2) {
total_size--;
}
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
/* Length can be 1 or 0 */
if (resource->data.irq.descriptor_length == 0) {
total_size--;
}
break;
case ACPI_RESOURCE_TYPE_VENDOR:
/*
* Vendor Defined Resource:
* For a Vendor Specific resource, if the Length is between 1 and 7
* it will be created as a Small Resource data type, otherwise it
* is a Large Resource data type.
*/
if (resource->data.vendor.byte_length > 7) {
/* Base size of a Large resource descriptor */
total_size =
sizeof(struct aml_resource_large_header);
}
/* Add the size of the vendor-specific data */
total_size = (acpi_rs_length)
(total_size + resource->data.vendor.byte_length);
break;
case ACPI_RESOURCE_TYPE_END_TAG:
/*
* End Tag:
* We are done -- return the accumulated total size.
*/
*size_needed = aml_size_needed + total_size;
/* Normal exit */
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_TYPE_ADDRESS16:
/*
* 16-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address16.
resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
/*
* 32-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address32.
resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
/*
* 64-Bit Address Resource:
* Add the size of the optional resource_source info
*/
total_size = (acpi_rs_length)
(total_size +
acpi_rs_struct_option_length(&resource->data.
address64.
resource_source));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
/*
* Extended IRQ Resource:
* Add the size of each additional optional interrupt beyond the
* required 1 (4 bytes for each u32 interrupt number)
*/
total_size = (acpi_rs_length)
(total_size +
((resource->data.extended_irq.interrupt_count -
1) * 4) +
/* Add the size of the optional resource_source info */
acpi_rs_struct_option_length(&resource->data.
extended_irq.
resource_source));
break;
default:
break;
}
/* Update the total */
aml_size_needed += total_size;
/* Point to the next object */
resource =
ACPI_ADD_PTR(struct acpi_resource, resource,
resource->length);
}
/* Did not find an end_tag resource descriptor */
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_list_length
*
* PARAMETERS: aml_buffer - Pointer to the resource byte stream
* aml_buffer_length - Size of aml_buffer
* size_needed - Where the size needed is returned
*
* RETURN: Status
*
* DESCRIPTION: Takes an external resource byte stream and calculates the size
* buffer needed to hold the corresponding internal resource
* descriptor linked list.
*
******************************************************************************/
acpi_status
acpi_rs_get_list_length(u8 * aml_buffer,
u32 aml_buffer_length, acpi_size * size_needed)
{
acpi_status status;
u8 *end_aml;
u8 *buffer;
u32 buffer_size;
u16 temp16;
u16 resource_length;
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
ACPI_FUNCTION_TRACE(rs_get_list_length);
*size_needed = 0;
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
while (aml_buffer < end_aml) {
/* Validate the Resource Type and Resource Length */
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
minimum_aml_resource_length =
acpi_gbl_resource_aml_sizes[resource_index];
/*
* Augment the size for descriptors with optional
* and/or variable length fields
*/
extra_struct_bytes = 0;
buffer =
aml_buffer + acpi_ut_get_resource_header_length(aml_buffer);
switch (acpi_ut_get_resource_type(aml_buffer)) {
case ACPI_RESOURCE_NAME_IRQ:
/*
* IRQ Resource:
* Get the number of bits set in the 16-bit IRQ mask
*/
ACPI_MOVE_16_TO_16(&temp16, buffer);
extra_struct_bytes = acpi_rs_count_set_bits(temp16);
break;
case ACPI_RESOURCE_NAME_DMA:
/*
* DMA Resource:
* Get the number of bits set in the 8-bit DMA mask
*/
extra_struct_bytes = acpi_rs_count_set_bits(*buffer);
break;
case ACPI_RESOURCE_NAME_VENDOR_SMALL:
case ACPI_RESOURCE_NAME_VENDOR_LARGE:
/*
* Vendor Resource:
* Get the number of vendor data bytes
*/
extra_struct_bytes = resource_length;
break;
case ACPI_RESOURCE_NAME_END_TAG:
/*
* End Tag:
* This is the normal exit, add size of end_tag
*/
*size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
case ACPI_RESOURCE_NAME_ADDRESS16:
case ACPI_RESOURCE_NAME_ADDRESS64:
/*
* Address Resource:
* Add the size of the optional resource_source
*/
extra_struct_bytes =
acpi_rs_stream_option_length(resource_length,
minimum_aml_resource_length);
break;
case ACPI_RESOURCE_NAME_EXTENDED_IRQ:
/*
* Extended IRQ Resource:
* Using the interrupt_table_length, add 4 bytes for each additional
* interrupt. Note: at least one interrupt is required and is
* included in the minimum descriptor size (reason for the -1)
*/
extra_struct_bytes = (buffer[1] - 1) * sizeof(u32);
/* Add the size of the optional resource_source */
extra_struct_bytes +=
acpi_rs_stream_option_length(resource_length -
extra_struct_bytes,
minimum_aml_resource_length);
break;
default:
break;
}
/*
* Update the required buffer size for the internal descriptor structs
*
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
extra_struct_bytes;
buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
"Type %.2X, AmlLength %.2X InternalLength %.2X\n",
acpi_ut_get_resource_type(aml_buffer),
acpi_ut_get_descriptor_length(aml_buffer),
buffer_size));
/*
* Point to the next resource within the AML stream using the length
* contained in the resource descriptor header
*/
aml_buffer += acpi_ut_get_descriptor_length(aml_buffer);
}
/* Did not find an end_tag resource descriptor */
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
*
* FUNCTION: acpi_rs_get_pci_routing_table_length
*
* PARAMETERS: package_object - Pointer to the package object
* buffer_size_needed - u32 pointer of the size buffer
* needed to properly return the
* parsed data
*
* RETURN: Status
*
* DESCRIPTION: Given a package representing a PCI routing table, this
* calculates the size of the corresponding linked list of
* descriptions.
*
******************************************************************************/
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
acpi_size * buffer_size_needed)
{
u32 number_of_elements;
acpi_size temp_size_needed = 0;
union acpi_operand_object **top_object_list;
u32 index;
union acpi_operand_object *package_element;
union acpi_operand_object **sub_object_list;
u8 name_found;
u32 table_index;
ACPI_FUNCTION_TRACE(rs_get_pci_routing_table_length);
number_of_elements = package_object->package.count;
/*
* Calculate the size of the return buffer.
* The base size is the number of elements * the sizes of the
* structures. Additional space for the strings is added below.
* The minus one is to subtract the size of the u8 Source[1]
* member because it is added below.
*
* But each PRT_ENTRY structure has a pointer to a string and
* the size of that string must be found.
*/
top_object_list = package_object->package.elements;
for (index = 0; index < number_of_elements; index++) {
/* Dereference the sub-package */
package_element = *top_object_list;
/* We must have a valid Package object */
if (!package_element ||
(package_element->common.type != ACPI_TYPE_PACKAGE)) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/*
* The sub_object_list will now point to an array of the
* four IRQ elements: Address, Pin, Source and source_index
*/
sub_object_list = package_element->package.elements;
/* Scan the irq_table_elements for the Source Name String */
name_found = FALSE;
for (table_index = 0; table_index < 4 && !name_found;
table_index++) {
if (*sub_object_list && /* Null object allowed */
((ACPI_TYPE_STRING ==
(*sub_object_list)->common.type) ||
((ACPI_TYPE_LOCAL_REFERENCE ==
(*sub_object_list)->common.type) &&
((*sub_object_list)->reference.class ==
ACPI_REFCLASS_NAME)))) {
name_found = TRUE;
} else {
/* Look at the next element */
sub_object_list++;
}
}
temp_size_needed += (sizeof(struct acpi_pci_routing_table) - 4);
/* Was a String type found? */
if (name_found) {
if ((*sub_object_list)->common.type == ACPI_TYPE_STRING) {
/*
* The length String.Length field does not include the
* terminating NULL, add 1
*/
temp_size_needed += ((acpi_size)
(*sub_object_list)->string.
length + 1);
} else {
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
* If no name was found, then this is a NULL, which is
* translated as a u32 zero.
*/
temp_size_needed += sizeof(u32);
}
/* Round up the size since each element must be aligned */
temp_size_needed = ACPI_ROUND_UP_TO_64BIT(temp_size_needed);
/* Point to the next union acpi_operand_object */
top_object_list++;
}
/*
* Add an extra element to the end of the list, essentially a
* NULL terminator
*/
*buffer_size_needed =
temp_size_needed + sizeof(struct acpi_pci_routing_table);
return_ACPI_STATUS(AE_OK);
}
| gpl-2.0 |
huangyuxuan01/kernel_samsung_smdk4210 | drivers/acpi/acpica/exutils.c | 3227 | 13571 |
/******************************************************************************
*
* Module Name: exutils - interpreter/scanner utilities
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
/*
* DEFINE_AML_GLOBALS is tested in amlcode.h
* to determine whether certain global names should be "defined" or only
* "declared" in the current compilation. This enhances maintainability
* by enabling a single header file to embody all knowledge of the names
* in question.
*
* Exactly one module of any executable should #define DEFINE_GLOBALS
* before #including the header files which use this convention. The
* names in question will be defined and initialized in that module,
* and declared as extern in all other modules which #include those
* header files.
*/
#define DEFINE_AML_GLOBALS
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exutils")
/* Local prototypes */
static u32 acpi_ex_digits_needed(u64 value, u32 base);
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ex_enter_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
* the interpreter region is a fatal system error. Used in
* conjunction with exit_interpreter.
*
******************************************************************************/
void acpi_ex_enter_interpreter(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ex_enter_interpreter);
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not acquire AML Interpreter mutex"));
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_reacquire_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
* fatal system error. Used in conjunction with
* relinquish_interpreter
*
******************************************************************************/
void acpi_ex_reacquire_interpreter(void)
{
ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
/*
* If the global serialized flag is set, do not release the interpreter,
* since it was not actually released by acpi_ex_relinquish_interpreter.
* This forces the interpreter to be single threaded.
*/
if (!acpi_gbl_all_methods_serialized) {
acpi_ex_enter_interpreter();
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_exit_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Exit the interpreter execution region. This is the top level
* routine used to exit the interpreter when all processing has
* been completed.
*
******************************************************************************/
void acpi_ex_exit_interpreter(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ex_exit_interpreter);
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not release AML Interpreter mutex"));
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_relinquish_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Exit the interpreter execution region, from within the
* interpreter - before attempting an operation that will possibly
* block the running thread.
*
* Cases where the interpreter is unlocked internally
* 1) Method to be blocked on a Sleep() AML opcode
* 2) Method to be blocked on an Acquire() AML opcode
* 3) Method to be blocked on a Wait() AML opcode
* 4) Method to be blocked to acquire the global lock
* 5) Method to be blocked waiting to execute a serialized control method
* that is currently executing
* 6) About to invoke a user-installed opregion handler
*
******************************************************************************/
void acpi_ex_relinquish_interpreter(void)
{
ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
/*
* If the global serialized flag is set, do not release the interpreter.
* This forces the interpreter to be single threaded.
*/
if (!acpi_gbl_all_methods_serialized) {
acpi_ex_exit_interpreter();
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_truncate_for32bit_table
*
* PARAMETERS: obj_desc - Object to be truncated
*
* RETURN: none
*
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
* 32-bit, as determined by the revision of the DSDT.
*
******************************************************************************/
void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
{
ACPI_FUNCTION_ENTRY();
/*
* Object must be a valid number and we must be executing
* a control method. NS node could be there for AML_INT_NAMEPATH_OP.
*/
if ((!obj_desc) ||
(ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
(obj_desc->common.type != ACPI_TYPE_INTEGER)) {
return;
}
if (acpi_gbl_integer_byte_width == 4) {
/*
* We are running a method that exists in a 32-bit ACPI table.
* Truncate the value to 32 bits by zeroing out the upper 32-bit field
*/
obj_desc->integer.value &= (u64) ACPI_UINT32_MAX;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_acquire_global_lock
*
* PARAMETERS: field_flags - Flags with Lock rule:
* always_lock or never_lock
*
* RETURN: None
*
* DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
* flags specifiy that it is to be obtained before field access.
*
******************************************************************************/
void acpi_ex_acquire_global_lock(u32 field_flags)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
/* Only use the lock if the always_lock bit is set */
if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
return_VOID;
}
/* Attempt to get the global lock, wait forever */
status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
acpi_gbl_global_lock_mutex,
acpi_os_get_thread_id());
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not acquire Global Lock"));
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_release_global_lock
*
* PARAMETERS: field_flags - Flags with Lock rule:
* always_lock or never_lock
*
* RETURN: None
*
* DESCRIPTION: Release the ACPI hardware Global Lock
*
******************************************************************************/
void acpi_ex_release_global_lock(u32 field_flags)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ex_release_global_lock);
/* Only use the lock if the always_lock bit is set */
if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
return_VOID;
}
/* Release the global lock */
status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
if (ACPI_FAILURE(status)) {
/* Report the error, but there isn't much else we can do */
ACPI_EXCEPTION((AE_INFO, status,
"Could not release Global Lock"));
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_digits_needed
*
* PARAMETERS: Value - Value to be represented
* Base - Base of representation
*
* RETURN: The number of digits.
*
* DESCRIPTION: Calculate the number of digits needed to represent the Value
* in the given Base (Radix)
*
******************************************************************************/
static u32 acpi_ex_digits_needed(u64 value, u32 base)
{
u32 num_digits;
u64 current_value;
ACPI_FUNCTION_TRACE(ex_digits_needed);
/* u64 is unsigned, so we don't worry about a '-' prefix */
if (value == 0) {
return_UINT32(1);
}
current_value = value;
num_digits = 0;
/* Count the digits in the requested base */
while (current_value) {
(void)acpi_ut_short_divide(current_value, base, ¤t_value,
NULL);
num_digits++;
}
return_UINT32(num_digits);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_eisa_id_to_string
*
* PARAMETERS: compressed_id - EISAID to be converted
* out_string - Where to put the converted string (8 bytes)
*
* RETURN: None
*
* DESCRIPTION: Convert a numeric EISAID to string representation. Return
* buffer must be large enough to hold the string. The string
* returned is always exactly of length ACPI_EISAID_STRING_SIZE
* (includes null terminator). The EISAID is always 32 bits.
*
******************************************************************************/
void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
{
u32 swapped_id;
ACPI_FUNCTION_ENTRY();
/* The EISAID should be a 32-bit integer */
if (compressed_id > ACPI_UINT32_MAX) {
ACPI_WARNING((AE_INFO,
"Expected EISAID is larger than 32 bits: 0x%8.8X%8.8X, truncating",
ACPI_FORMAT_UINT64(compressed_id)));
}
/* Swap ID to big-endian to get contiguous bits */
swapped_id = acpi_ut_dword_byte_swap((u32)compressed_id);
/* First 3 bytes are uppercase letters. Next 4 bytes are hexadecimal */
out_string[0] =
(char)(0x40 + (((unsigned long)swapped_id >> 26) & 0x1F));
out_string[1] = (char)(0x40 + ((swapped_id >> 21) & 0x1F));
out_string[2] = (char)(0x40 + ((swapped_id >> 16) & 0x1F));
out_string[3] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 12);
out_string[4] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 8);
out_string[5] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 4);
out_string[6] = acpi_ut_hex_to_ascii_char((u64) swapped_id, 0);
out_string[7] = 0;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_integer_to_string
*
* PARAMETERS: out_string - Where to put the converted string. At least
* 21 bytes are needed to hold the largest
* possible 64-bit integer.
* Value - Value to be converted
*
* RETURN: None, string
*
* DESCRIPTION: Convert a 64-bit integer to decimal string representation.
* Assumes string buffer is large enough to hold the string. The
* largest string is (ACPI_MAX64_DECIMAL_DIGITS + 1).
*
******************************************************************************/
void acpi_ex_integer_to_string(char *out_string, u64 value)
{
u32 count;
u32 digits_needed;
u32 remainder;
ACPI_FUNCTION_ENTRY();
digits_needed = acpi_ex_digits_needed(value, 10);
out_string[digits_needed] = 0;
for (count = digits_needed; count > 0; count--) {
(void)acpi_ut_short_divide(value, 10, &value, &remainder);
out_string[count - 1] = (char)('0' + remainder);
}
}
#endif
| gpl-2.0 |
infected-lp/kernel_sony_msm8974 | sound/soc/codecs/wm8737.c | 4507 | 19796 | /*
* wm8737.c -- WM8737 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8737.h"
#define WM8737_NUM_SUPPLIES 4
static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = {
"DCVDD",
"DBVDD",
"AVDD",
"MVDD",
};
/* codec private data */
struct wm8737_priv {
enum snd_soc_control_type control_type;
struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES];
unsigned int mclk;
};
static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = {
0x00C3, /* R0 - Left PGA volume */
0x00C3, /* R1 - Right PGA volume */
0x0007, /* R2 - AUDIO path L */
0x0007, /* R3 - AUDIO path R */
0x0000, /* R4 - 3D Enhance */
0x0000, /* R5 - ADC Control */
0x0000, /* R6 - Power Management */
0x000A, /* R7 - Audio Format */
0x0000, /* R8 - Clocking */
0x000F, /* R9 - MIC Preamp Control */
0x0003, /* R10 - Misc Bias Control */
0x0000, /* R11 - Noise Gate */
0x007C, /* R12 - ALC1 */
0x0000, /* R13 - ALC2 */
0x0032, /* R14 - ALC3 */
};
static int wm8737_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8737_RESET, 0);
}
static const unsigned int micboost_tlv[] = {
TLV_DB_RANGE_HEAD(4),
0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
};
static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0);
static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0);
static const char *micbias_enum_text[] = {
"25%",
"50%",
"75%",
"100%",
};
static const struct soc_enum micbias_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text);
static const char *low_cutoff_text[] = {
"Low", "High"
};
static const struct soc_enum low_3d =
SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text);
static const char *high_cutoff_text[] = {
"High", "Low"
};
static const struct soc_enum high_3d =
SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text);
static const char *alc_fn_text[] = {
"Disabled", "Right", "Left", "Stereo"
};
static const struct soc_enum alc_fn =
SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text);
static const char *alc_hold_text[] = {
"0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms",
"170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s",
"10.916s", "21.832s", "43.691s"
};
static const struct soc_enum alc_hold =
SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text);
static const char *alc_atk_text[] = {
"8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms",
"1.075s", "2.15s", "4.3s", "8.6s"
};
static const struct soc_enum alc_atk =
SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text);
static const char *alc_dcy_text[] = {
"33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s",
"4.3s", "8.6s", "17.2s", "34.41s"
};
static const struct soc_enum alc_dcy =
SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text);
static const struct snd_kcontrol_new wm8737_snd_controls[] = {
SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
6, 3, 0, micboost_tlv),
SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
4, 1, 0),
SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
3, 1, 0),
SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME,
WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv),
SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
2, 1, 0),
SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0),
SOC_ENUM("Mic PGA Bias", micbias_enum),
SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0),
SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1),
SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
SOC_ENUM("3D Low Cut-off", low_3d),
SOC_ENUM("3D High Cut-off", low_3d),
SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0,
ng_tlv),
SOC_ENUM("ALC", alc_fn),
SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv),
SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv),
SOC_ENUM("ALC Hold Time", alc_hold),
SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0),
SOC_ENUM("ALC Attack Time", alc_atk),
SOC_ENUM("ALC Decay Time", alc_dcy),
};
static const char *linsel_text[] = {
"LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC",
};
static const struct soc_enum linsel_enum =
SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text);
static const struct snd_kcontrol_new linsel_mux =
SOC_DAPM_ENUM("LINSEL", linsel_enum);
static const char *rinsel_text[] = {
"RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC",
};
static const struct soc_enum rinsel_enum =
SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text);
static const struct snd_kcontrol_new rinsel_mux =
SOC_DAPM_ENUM("RINSEL", rinsel_enum);
static const char *bypass_text[] = {
"Direct", "Preamp"
};
static const struct soc_enum lbypass_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text);
static const struct snd_kcontrol_new lbypass_mux =
SOC_DAPM_ENUM("Left Bypass", lbypass_enum);
static const struct soc_enum rbypass_enum =
SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text);
static const struct snd_kcontrol_new rbypass_mux =
SOC_DAPM_ENUM("Left Bypass", rbypass_enum);
static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("LINPUT1"),
SND_SOC_DAPM_INPUT("LINPUT2"),
SND_SOC_DAPM_INPUT("LINPUT3"),
SND_SOC_DAPM_INPUT("RINPUT1"),
SND_SOC_DAPM_INPUT("RINPUT2"),
SND_SOC_DAPM_INPUT("RINPUT3"),
SND_SOC_DAPM_INPUT("LACIN"),
SND_SOC_DAPM_INPUT("RACIN"),
SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux),
SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux),
SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux),
SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux),
SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0),
SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0),
SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0),
SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0),
};
static const struct snd_soc_dapm_route intercon[] = {
{ "LINSEL", "LINPUT1", "LINPUT1" },
{ "LINSEL", "LINPUT2", "LINPUT2" },
{ "LINSEL", "LINPUT3", "LINPUT3" },
{ "LINSEL", "LINPUT1 DC", "LINPUT1" },
{ "RINSEL", "RINPUT1", "RINPUT1" },
{ "RINSEL", "RINPUT2", "RINPUT2" },
{ "RINSEL", "RINPUT3", "RINPUT3" },
{ "RINSEL", "RINPUT1 DC", "RINPUT1" },
{ "Left Preamp Mux", "Preamp", "LINSEL" },
{ "Left Preamp Mux", "Direct", "LACIN" },
{ "Right Preamp Mux", "Preamp", "RINSEL" },
{ "Right Preamp Mux", "Direct", "RACIN" },
{ "PGAL", NULL, "Left Preamp Mux" },
{ "PGAR", NULL, "Right Preamp Mux" },
{ "ADCL", NULL, "PGAL" },
{ "ADCR", NULL, "PGAR" },
{ "AIF", NULL, "ADCL" },
{ "AIF", NULL, "ADCR" },
};
static int wm8737_add_widgets(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
ARRAY_SIZE(wm8737_dapm_widgets));
snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
return 0;
}
/* codec mclk clock divider coefficients */
static const struct {
u32 mclk;
u32 rate;
u8 usb;
u8 sr;
} coeff_div[] = {
{ 12288000, 8000, 0, 0x4 },
{ 12288000, 12000, 0, 0x8 },
{ 12288000, 16000, 0, 0xa },
{ 12288000, 24000, 0, 0x1c },
{ 12288000, 32000, 0, 0xc },
{ 12288000, 48000, 0, 0 },
{ 12288000, 96000, 0, 0xe },
{ 11289600, 8000, 0, 0x14 },
{ 11289600, 11025, 0, 0x18 },
{ 11289600, 22050, 0, 0x1a },
{ 11289600, 44100, 0, 0x10 },
{ 11289600, 88200, 0, 0x1e },
{ 18432000, 8000, 0, 0x5 },
{ 18432000, 12000, 0, 0x9 },
{ 18432000, 16000, 0, 0xb },
{ 18432000, 24000, 0, 0x1b },
{ 18432000, 32000, 0, 0xd },
{ 18432000, 48000, 0, 0x1 },
{ 18432000, 96000, 0, 0x1f },
{ 16934400, 8000, 0, 0x15 },
{ 16934400, 11025, 0, 0x19 },
{ 16934400, 22050, 0, 0x1b },
{ 16934400, 44100, 0, 0x11 },
{ 16934400, 88200, 0, 0x1f },
{ 12000000, 8000, 1, 0x4 },
{ 12000000, 11025, 1, 0x19 },
{ 12000000, 12000, 1, 0x8 },
{ 12000000, 16000, 1, 0xa },
{ 12000000, 22050, 1, 0x1b },
{ 12000000, 24000, 1, 0x1c },
{ 12000000, 32000, 1, 0xc },
{ 12000000, 44100, 1, 0x11 },
{ 12000000, 48000, 1, 0x0 },
{ 12000000, 88200, 1, 0x1f },
{ 12000000, 96000, 1, 0xe },
};
static int wm8737_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int i;
u16 clocking = 0;
u16 af = 0;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (coeff_div[i].rate != params_rate(params))
continue;
if (coeff_div[i].mclk == wm8737->mclk)
break;
if (coeff_div[i].mclk == wm8737->mclk * 2) {
clocking |= WM8737_CLKDIV2;
break;
}
}
if (i == ARRAY_SIZE(coeff_div)) {
dev_err(codec->dev, "%dHz MCLK can't support %dHz\n",
wm8737->mclk, params_rate(params));
return -EINVAL;
}
clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
af |= 0x8;
break;
case SNDRV_PCM_FORMAT_S24_LE:
af |= 0x10;
break;
case SNDRV_PCM_FORMAT_S32_LE:
af |= 0x18;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af);
snd_soc_update_bits(codec, WM8737_CLOCKING,
WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK,
clocking);
return 0;
}
static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int i;
for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
if (freq == coeff_div[i].mclk ||
freq == coeff_div[i].mclk * 2) {
wm8737->mclk = freq;
return 0;
}
}
dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq);
return -EINVAL;
}
static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 af = 0;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
af |= WM8737_MS;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
af |= 0x2;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
af |= 0x1;
break;
case SND_SOC_DAIFMT_DSP_A:
af |= 0x3;
break;
case SND_SOC_DAIFMT_DSP_B:
af |= 0x13;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
af |= WM8737_LRP;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT,
WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af);
return 0;
}
static int wm8737_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int ret;
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
/* VMID at 2*75k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 0);
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev,
"Failed to enable supplies: %d\n",
ret);
return ret;
}
snd_soc_cache_sync(codec);
/* Fast VMID ramp at 2*2.5k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 0x4);
/* Bring VMID up */
snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
WM8737_VMID_MASK |
WM8737_VREF_MASK,
WM8737_VMID_MASK |
WM8737_VREF_MASK);
msleep(500);
}
/* VMID at 2*300k */
snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
WM8737_VMIDSEL_MASK, 2);
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
WM8737_VMID_MASK | WM8737_VREF_MASK, 0);
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8737_RATES SNDRV_PCM_RATE_8000_96000
#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8737_dai_ops = {
.hw_params = wm8737_hw_params,
.set_sysclk = wm8737_set_dai_sysclk,
.set_fmt = wm8737_set_dai_fmt,
};
static struct snd_soc_dai_driver wm8737_dai = {
.name = "wm8737",
.capture = {
.stream_name = "Capture",
.channels_min = 2, /* Mono modes not yet supported */
.channels_max = 2,
.rates = WM8737_RATES,
.formats = WM8737_FORMATS,
},
.ops = &wm8737_dai_ops,
};
#ifdef CONFIG_PM
static int wm8737_suspend(struct snd_soc_codec *codec)
{
wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8737_resume(struct snd_soc_codec *codec)
{
wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
#else
#define wm8737_suspend NULL
#define wm8737_resume NULL
#endif
static int wm8737_probe(struct snd_soc_codec *codec)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
int ret, i;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type);
if (ret != 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++)
wm8737->supplies[i].supply = wm8737_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
wm8737->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_get;
}
ret = wm8737_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
goto err_enable;
}
snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU,
WM8737_LVU);
snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU,
WM8737_RVU);
wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Bias level configuration will have done an extra enable */
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
snd_soc_add_codec_controls(codec, wm8737_snd_controls,
ARRAY_SIZE(wm8737_snd_controls));
wm8737_add_widgets(codec);
return 0;
err_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
err_get:
regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
return ret;
}
static int wm8737_remove(struct snd_soc_codec *codec)
{
struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
.probe = wm8737_probe,
.remove = wm8737_remove,
.suspend = wm8737_suspend,
.resume = wm8737_resume,
.set_bias_level = wm8737_set_bias_level,
.reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8737_reg,
};
static const struct of_device_id wm8737_of_match[] = {
{ .compatible = "wlf,wm8737", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8737_of_match);
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8737_priv *wm8737;
int ret;
wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
if (wm8737 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8737);
wm8737->control_type = SND_SOC_I2C;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8737, &wm8737_dai, 1);
if (ret < 0)
kfree(wm8737);
return ret;
}
static __devexit int wm8737_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
return 0;
}
static const struct i2c_device_id wm8737_i2c_id[] = {
{ "wm8737", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id);
static struct i2c_driver wm8737_i2c_driver = {
.driver = {
.name = "wm8737",
.owner = THIS_MODULE,
.of_match_table = wm8737_of_match,
},
.probe = wm8737_i2c_probe,
.remove = __devexit_p(wm8737_i2c_remove),
.id_table = wm8737_i2c_id,
};
#endif
#if defined(CONFIG_SPI_MASTER)
static int __devinit wm8737_spi_probe(struct spi_device *spi)
{
struct wm8737_priv *wm8737;
int ret;
wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
if (wm8737 == NULL)
return -ENOMEM;
wm8737->control_type = SND_SOC_SPI;
spi_set_drvdata(spi, wm8737);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8737, &wm8737_dai, 1);
if (ret < 0)
kfree(wm8737);
return ret;
}
static int __devexit wm8737_spi_remove(struct spi_device *spi)
{
snd_soc_unregister_codec(&spi->dev);
kfree(spi_get_drvdata(spi));
return 0;
}
static struct spi_driver wm8737_spi_driver = {
.driver = {
.name = "wm8737",
.owner = THIS_MODULE,
.of_match_table = wm8737_of_match,
},
.probe = wm8737_spi_probe,
.remove = __devexit_p(wm8737_spi_remove),
};
#endif /* CONFIG_SPI_MASTER */
static int __init wm8737_modinit(void)
{
int ret;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8737_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n",
ret);
}
#endif
#if defined(CONFIG_SPI_MASTER)
ret = spi_register_driver(&wm8737_spi_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n",
ret);
}
#endif
return 0;
}
module_init(wm8737_modinit);
static void __exit wm8737_exit(void)
{
#if defined(CONFIG_SPI_MASTER)
spi_unregister_driver(&wm8737_spi_driver);
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8737_i2c_driver);
#endif
}
module_exit(wm8737_exit);
MODULE_DESCRIPTION("ASoC WM8737 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
GearCM/android_kernel_samsung_exynos5410 | drivers/usb/gadget/goku_udc.c | 4763 | 47813 | /*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
* Copyright (C) 2000-2002 Lineo
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* This device has ep0 and three semi-configurable bulk/interrupt endpoints.
*
* - Endpoint numbering is fixed: ep{1,2,3}-bulk
* - Gadget drivers can choose ep maxpacket (8/16/32/64)
* - Gadget drivers can choose direction (IN, OUT)
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include "goku_udc.h"
#define DRIVER_DESC "TC86C001 USB Device Controller"
#define DRIVER_VERSION "30-Oct 2003"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name [] = "goku_udc";
static const char driver_desc [] = DRIVER_DESC;
MODULE_AUTHOR("source@mvista.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* IN dma behaves ok under testing, though the IN-dma abort paths don't
* seem to behave quite as expected. Used by default.
*
* OUT dma documents design problems handling the common "short packet"
* transfer termination policy; it couldn't be enabled by default, even
* if the OUT-dma abort problems had a resolution.
*/
static unsigned use_dma = 1;
#if 0
//#include <linux/moduleparam.h>
/* "modprobe goku_udc use_dma=1" etc
* 0 to disable dma
* 1 to use IN dma only (normal operation)
* 2 to use IN and OUT dma
*/
module_param(use_dma, uint, S_IRUGO);
#endif
/*-------------------------------------------------------------------------*/
static void nuke(struct goku_ep *, int status);
static inline void
command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
{
writel(COMMAND_EP(epnum) | command, ®s->Command);
udelay(300);
}
static int
goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct goku_udc *dev;
struct goku_ep *ep;
u32 mode;
u16 max;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (ep == &dev->ep[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->num != usb_endpoint_num(desc))
return -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
default:
return -EINVAL;
}
if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
!= EPxSTATUS_EP_INVALID)
return -EBUSY;
/* enabling the no-toggle interrupt mode would need an api hook */
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
case 64: mode++;
case 32: mode++;
case 16: mode++;
case 8: mode <<= 3;
break;
default:
return -EINVAL;
}
mode |= 2 << 1; /* bulk, or intr-with-toggle */
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
ep->is_in = usb_endpoint_dir_in(desc);
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
} else {
ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
if (ep->dma)
DBG(dev, "%s out-dma hides short packets\n",
ep->ep.name);
}
spin_lock_irqsave(&ep->dev->lock, flags);
/* ep1 and ep2 can do double buffering and/or dma */
if (ep->num < 3) {
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 tmp;
/* double buffer except (for now) with pio in */
tmp = ((ep->dma || !ep->is_in)
? 0x10 /* double buffered */
: 0x11 /* single buffer */
) << ep->num;
tmp |= readl(®s->EPxSingle);
writel(tmp, ®s->EPxSingle);
tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
tmp |= readl(®s->EPxBCS);
writel(tmp, ®s->EPxBCS);
}
writel(mode, ep->reg_mode);
command(ep->dev->regs, COMMAND_RESET, ep->num);
ep->ep.maxpacket = max;
ep->stopped = 0;
ep->desc = desc;
spin_unlock_irqrestore(&ep->dev->lock, flags);
DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
max);
return 0;
}
static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
{
struct goku_udc *dev = ep->dev;
if (regs) {
command(regs, COMMAND_INVALID, ep->num);
if (ep->num) {
if (ep->num == UDC_MSTWR_ENDPOINT)
dev->int_enable &= ~(INT_MSTWREND
|INT_MSTWRTMOUT);
else if (ep->num == UDC_MSTRD_ENDPOINT)
dev->int_enable &= ~INT_MSTRDEND;
dev->int_enable &= ~INT_EPxDATASET (ep->num);
} else
dev->int_enable &= ~INT_EP0;
writel(dev->int_enable, ®s->int_enable);
readl(®s->int_enable);
if (ep->num < 3) {
struct goku_udc_regs __iomem *r = ep->dev->regs;
u32 tmp;
tmp = readl(&r->EPxSingle);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxSingle);
tmp = readl(&r->EPxBCS);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxBCS);
}
/* reset dma in case we're still using it */
if (ep->dma) {
u32 master;
master = readl(®s->dma_master) & MST_RW_BITS;
if (ep->num == UDC_MSTWR_ENDPOINT) {
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
} else {
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
}
writel(master, ®s->dma_master);
}
}
ep->ep.maxpacket = MAX_FIFO_SIZE;
ep->desc = NULL;
ep->ep.desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
ep->dma = 0;
}
static int goku_ep_disable(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !ep->desc)
return -ENODEV;
dev = ep->dev;
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "disable %s\n", _ep->name);
spin_lock_irqsave(&dev->lock, flags);
nuke(ep, -ESHUTDOWN);
ep_reset(dev->regs, ep);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct goku_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
struct goku_udc *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (ep->dma)
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static inline int
write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
{
unsigned length, count;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
writel(*buf++, fifo);
return length;
}
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc *dev = ep->dev;
u32 tmp;
u8 *buf;
unsigned count;
int is_last;
tmp = readl(&dev->regs->DataSet);
buf = req->req.buf + req->req.actual;
prefetch(buf);
dev = ep->dev;
if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
return -EL2HLT;
/* NOTE: just single-buffered PIO-IN for now. */
if (unlikely((tmp & DATASET_A(ep->num)) != 0))
return 0;
/* clear our "packet available" irq */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
/* last packet often short (sometimes a zlp, especially on ep0) */
if (unlikely(count != ep->ep.maxpacket)) {
writel(~(1<<ep->num), &dev->regs->EOP);
if (ep->num == 0) {
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
}
is_last = 1;
} else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
}
#if 0 /* printk seemed to trash is_last...*/
//#ifdef USB_TRACE
VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
ep->ep.name, count, is_last ? "/last" : "",
req->req.length - req->req.actual, req);
#endif
/* requests complete when all IN data is in the FIFO,
* or sometimes later, if a zlp was needed.
*/
if (is_last) {
done(ep, req, 0);
return 1;
}
return 0;
}
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs;
u32 size, set;
u8 *buf;
unsigned bufferspace, is_short, dbuff;
regs = ep->dev->regs;
top:
buf = req->req.buf + req->req.actual;
prefetchw(buf);
if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
return -EL2HLT;
dbuff = (ep->num == 1 || ep->num == 2);
do {
/* ack dataset irq matching the status we'll handle */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), ®s->int_status);
set = readl(®s->DataSet) & DATASET_AB(ep->num);
size = readl(®s->EPxSizeLA[ep->num]);
bufferspace = req->req.length - req->req.actual;
/* usually do nothing without an OUT packet */
if (likely(ep->num != 0 || bufferspace != 0)) {
if (unlikely(set == 0))
break;
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(®s->EPxSizeLB[ep->num]);
if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
/* ep0out no-out-data case for set_config, etc */
} else
size = 0;
/* read all bytes from this packet */
req->req.actual += size;
is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
ep->ep.name, size, is_short ? "/S" : "",
req, req->req.actual, req->req.length);
#endif
while (likely(size-- != 0)) {
u8 byte = (u8) readl(ep->reg_fifo);
if (unlikely(bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data in this packet.
*/
if (req->req.status != -EOVERFLOW)
DBG(ep->dev, "%s overflow %u\n",
ep->ep.name, size);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
/* completion */
if (unlikely(is_short || req->req.actual == req->req.length)) {
if (unlikely(ep->num == 0)) {
/* non-control endpoints now usable? */
if (ep->dev->req_config)
writel(ep->dev->configured
? USBSTATE_CONFIGURED
: 0,
®s->UsbState);
/* ep0out status stage */
writel(~(1<<0), ®s->EOP);
ep->stopped = 1;
ep->dev->ep0state = EP0_STATUS;
}
done(ep, req, 0);
/* empty the second buffer asap */
if (dbuff && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct goku_request, queue);
goto top;
}
return 1;
}
} while (dbuff);
return 0;
}
static inline void
pio_irq_enable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable |= INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_irq_disable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable &= ~INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_advance(struct goku_ep *ep)
{
struct goku_request *req;
if (unlikely(list_empty (&ep->queue)))
return;
req = list_entry(ep->queue.next, struct goku_request, queue);
(ep->is_in ? write_fifo : read_fifo)(ep, req);
}
/*-------------------------------------------------------------------------*/
// return: 0 = q running, 1 = q stopped, negative = errno
static int start_dma(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
u32 start = req->req.dma;
u32 end = start + req->req.length - 1;
master = readl(®s->dma_master) & MST_RW_BITS;
/* re-init the bits affecting IN dma; careful with zlps */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA)) {
DBG (ep->dev, "start, IN active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->in_dma_end);
writel(start, ®s->in_dma_start);
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
master = MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
master = MST_RD_ENA | MST_EOPB_ENA;
else
master = MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
/* Goku DMA-OUT merges short packets, which plays poorly with
* protocols where short packets mark the transfer boundaries.
* The chip supports a nonstandard policy with INT_MSTWRTMOUT,
* ending transfers after 3 SOFs; we don't turn it on.
*/
} else {
if (unlikely(master & MST_WR_ENA)) {
DBG (ep->dev, "start, OUT active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->out_dma_end);
writel(start, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_ENA | MST_TIMEOUT_DIS;
ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
}
writel(master, ®s->dma_master);
writel(ep->dev->int_enable, ®s->int_enable);
return 0;
}
static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
{
struct goku_request *req;
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
master = readl(®s->dma_master);
if (unlikely(list_empty(&ep->queue))) {
stop:
if (ep->is_in)
dev->int_enable &= ~INT_MSTRDEND;
else
dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
writel(dev->int_enable, ®s->int_enable);
return;
}
req = list_entry(ep->queue.next, struct goku_request, queue);
/* normal hw dma completion (not abort) */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA))
return;
req->req.actual = readl(®s->in_dma_current);
} else {
if (unlikely(master & MST_WR_ENA))
return;
/* hardware merges short packets, and also hides packet
* overruns. a partial packet MAY be in the fifo here.
*/
req->req.actual = readl(®s->out_dma_current);
}
req->req.actual -= req->req.dma;
req->req.actual++;
#ifdef USB_TRACE
VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
ep->ep.name, ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length, req);
#endif
done(ep, req, 0);
if (list_empty(&ep->queue))
goto stop;
req = list_entry(ep->queue.next, struct goku_request, queue);
(void) start_dma(ep, req);
}
static void abort_dma(struct goku_ep *ep, int status)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
struct goku_request *req;
u32 curr, master;
/* NAK future host requests, hoping the implicit delay lets the
* dma engine finish reading (or writing) its latest packet and
* empty the dma buffer (up to 16 bytes).
*
* This avoids needing to clean up a partial packet in the fifo;
* we can't do that for IN without side effects to HALT and TOGGLE.
*/
command(regs, COMMAND_FIFO_DISABLE, ep->num);
req = list_entry(ep->queue.next, struct goku_request, queue);
master = readl(®s->dma_master) & MST_RW_BITS;
/* FIXME using these resets isn't usably documented. this may
* not work unless it's followed by disabling the endpoint.
*
* FIXME the OUT reset path doesn't even behave consistently.
*/
if (ep->is_in) {
if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0))
goto finished;
curr = readl(®s->in_dma_current);
writel(curr, ®s->in_dma_end);
writel(curr, ®s->in_dma_start);
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_RD_ENA)
DBG(ep->dev, "IN dma active after reset!\n");
} else {
if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0))
goto finished;
curr = readl(®s->out_dma_current);
writel(curr, ®s->out_dma_end);
writel(curr, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_WR_ENA)
DBG(ep->dev, "OUT dma active after reset!\n");
}
req->req.actual = (curr - req->req.dma) + 1;
req->req.status = status;
VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length);
command(regs, COMMAND_FIFO_ENABLE, ep->num);
return;
finished:
/* dma already completed; no abort needed */
command(regs, COMMAND_FIFO_ENABLE, ep->num);
req->req.actual = req->req.length;
req->req.status = 0;
}
/*-------------------------------------------------------------------------*/
static int
goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
int status;
/* always require a cpu-view buffer so pio works */
req = container_of(_req, struct goku_request, req);
if (unlikely(!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)))
return -EINVAL;
ep = container_of(_ep, struct goku_ep, ep);
if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
/* can't touch registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
/* set up dma mapping in case the caller didn't */
if (ep->dma) {
status = usb_gadget_map_request(&dev->gadget, &req->req,
ep->is_in);
if (status)
return status;
}
#ifdef USB_TRACE
VDBG(dev, "%s queue req %p, len %u buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* for ep0 IN without premature status, zlp is required and
* writing EOP starts the status stage (OUT).
*/
if (unlikely(ep->num == 0 && ep->is_in))
_req->zero = 1;
/* kickstart this i/o queue? */
status = 0;
if (list_empty(&ep->queue) && likely(!ep->stopped)) {
/* dma: done after dma completion IRQ (or error)
* pio: done after last fifo operation
*/
if (ep->dma)
status = start_dma(ep, req);
else
status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
} /* else pio or dma irq handler advances the queue. */
if (likely(req != 0))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
&& likely(ep->num != 0)
&& !ep->dma
&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
pio_irq_enable(dev, dev->regs, ep->num);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return status;
}
/* dequeue ALL requests */
static void nuke(struct goku_ep *ep, int status)
{
struct goku_request *req;
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
if (ep->dma)
abort_dma(ep, status);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct goku_request, queue);
done(ep, req, status);
}
}
/* dequeue JUST ONE request */
static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !_req || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* we can't touch (dma) registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
_req);
spin_lock_irqsave(&dev->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore (&dev->lock, flags);
return -EINVAL;
}
if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
abort_dma(ep, -ECONNRESET);
done(ep, req, -ECONNRESET);
dma_advance(dev, ep);
} else if (!list_empty(&req->queue))
done(ep, req, -ECONNRESET);
else
req = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return req ? 0 : -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
static void goku_clear_halt(struct goku_ep *ep)
{
// assert (ep->num !=0)
VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
if (ep->stopped) {
ep->stopped = 0;
if (ep->dma) {
struct goku_request *req;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct goku_request,
queue);
(void) start_dma(ep, req);
} else
pio_advance(ep);
}
}
static int goku_set_halt(struct usb_ep *_ep, int value)
{
struct goku_ep *ep;
unsigned long flags;
int retval = 0;
if (!_ep)
return -ENODEV;
ep = container_of (_ep, struct goku_ep, ep);
if (ep->num == 0) {
if (value) {
ep->dev->ep0state = EP0_STALL;
ep->dev->ep[0].stopped = 1;
} else
return -EINVAL;
/* don't change EPxSTATUS_EP_INVALID to READY */
} else if (!ep->desc) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value
/* data in (either) packet buffer? */
&& (readl(&ep->dev->regs->DataSet)
& DATASET_AB(ep->num)))
retval = -EAGAIN;
else if (!value)
goku_clear_halt(ep);
else {
ep->stopped = 1;
VDBG(ep->dev, "%s set halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_STALL, ep->num);
readl(ep->reg_status);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
}
static int goku_fifo_status(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct goku_ep, ep);
/* size is only reported sanely for OUT */
if (ep->is_in)
return -EOPNOTSUPP;
/* ignores 16-byte dma buffer; SizeH == 0 */
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE;
size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE;
VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
return size;
}
static void goku_fifo_flush(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return;
ep = container_of(_ep, struct goku_ep, ep);
VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
/* don't change EPxSTATUS_EP_INVALID to READY */
if (!ep->desc && ep->num != 0) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return;
}
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]);
size &= DATASIZE;
/* Non-desirable behavior: FIFO_CLEAR also clears the
* endpoint halt feature. For OUT, we _could_ just read
* the bytes out (PIO, if !ep->dma); for in, no choice.
*/
if (size)
command(regs, COMMAND_FIFO_CLEAR, ep->num);
}
static struct usb_ep_ops goku_ep_ops = {
.enable = goku_ep_enable,
.disable = goku_ep_disable,
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
.queue = goku_queue,
.dequeue = goku_dequeue,
.set_halt = goku_set_halt,
.fifo_status = goku_fifo_status,
.fifo_flush = goku_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int goku_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static int goku_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *));
static int goku_stop(struct usb_gadget_driver *driver);
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
.start = goku_start,
.stop = goku_stop,
// no remote wakeup
// not selfpowered
};
/*-------------------------------------------------------------------------*/
static inline char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
else if (use_dma == 2)
return "(dma IN and OUT)";
else
return "(dma IN)";
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void
dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
{
int t;
/* int_status is the same format ... */
t = scnprintf(*next, *size,
"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
(mask & INT_SYSERROR) ? " sys" : "",
(mask & INT_MSTRDEND) ? " in-dma" : "",
(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
(mask & INT_MSTWREND) ? " out-dma" : "",
(mask & INT_MSTWRSET) ? " wrset" : "",
(mask & INT_ERR) ? " err" : "",
(mask & INT_SOF) ? " sof" : "",
(mask & INT_EP3NAK) ? " ep3nak" : "",
(mask & INT_EP2NAK) ? " ep2nak" : "",
(mask & INT_EP1NAK) ? " ep1nak" : "",
(mask & INT_EP3DATASET) ? " ep3" : "",
(mask & INT_EP2DATASET) ? " ep2" : "",
(mask & INT_EP1DATASET) ? " ep1" : "",
(mask & INT_STATUSNAK) ? " ep0snak" : "",
(mask & INT_STATUS) ? " ep0status" : "",
(mask & INT_SETUP) ? " setup" : "",
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
*size -= t;
*next += t;
}
static int
udc_proc_read(char *buffer, char **start, off_t off, int count,
int *eof, void *_dev)
{
char *buf = buffer;
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
char *next = buf;
unsigned size = count;
unsigned long flags;
int i, t, is_usb_connected;
u32 tmp;
if (off != 0)
return 0;
local_irq_save(flags);
/* basic device status */
tmp = readl(®s->power_detect);
is_usb_connected = tmp & PW_DETECT;
t = scnprintf(next, size,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
"Host %s, %s\n"
"\n",
pci_name(dev->pdev), driver_desc,
driver_name, DRIVER_VERSION, dmastr(),
dev->driver ? dev->driver->driver.name : "(none)",
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
({char *state;
switch(dev->ep0state){
case EP0_DISCONNECT: state = "ep0_disconnect"; break;
case EP0_IDLE: state = "ep0_idle"; break;
case EP0_IN: state = "ep0_in"; break;
case EP0_OUT: state = "ep0_out"; break;
case EP0_STATUS: state = "ep0_status"; break;
case EP0_STALL: state = "ep0_stall"; break;
case EP0_SUSPEND: state = "ep0_suspend"; break;
default: state = "ep0_?"; break;
} state; })
);
size -= t;
next += t;
dump_intmask("int_status", readl(®s->int_status), &next, &size);
dump_intmask("int_enable", readl(®s->int_enable), &next, &size);
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
"single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(®s->DataSet),
readl(®s->EPxSingle), readl(®s->EPxBCS),
readl(®s->UsbState),
readl(®s->address));
size -= t;
next += t;
tmp = readl(®s->dma_master);
t = scnprintf(next, size,
"dma %03X =" EIGHTBITS "%s %s\n", tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
(tmp & MST_RD_EOPB) ? " eopb" : "",
(tmp & MST_RD_RESET) ? " in_reset" : "",
(tmp & MST_WR_RESET) ? " out_reset" : "",
(tmp & MST_RD_ENA) ? " IN" : "",
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION)
? "ep1in/ep2out"
: "ep1out/ep2in");
size -= t;
next += t;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
if (i && !ep->desc)
continue;
tmp = readl(ep->reg_status);
t = scnprintf(next, size,
"%s %s max %u %s, irqs %lu, "
"status %02x (%s) " FOURBITS "\n",
ep->ep.name,
ep->is_in ? "in" : "out",
ep->ep.maxpacket,
ep->dma ? "dma" : "pio",
ep->irqs,
tmp, ({ char *s;
switch (tmp & EPxSTATUS_EP_MASK) {
case EPxSTATUS_EP_READY:
s = "ready"; break;
case EPxSTATUS_EP_DATAIN:
s = "packet"; break;
case EPxSTATUS_EP_FULL:
s = "full"; break;
case EPxSTATUS_EP_TX_ERR: // host will retry
s = "tx_err"; break;
case EPxSTATUS_EP_RX_ERR:
s = "rx_err"; break;
case EPxSTATUS_EP_BUSY: /* ep0 only */
s = "busy"; break;
case EPxSTATUS_EP_STALL:
s = "stall"; break;
case EPxSTATUS_EP_INVALID: // these "can't happen"
s = "invalid"; break;
default:
s = "?"; break;
}; s; }),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty(&ep->queue)) {
t = scnprintf(next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->queue.prev == &ep->queue) {
if (i == UDC_MSTRD_ENDPOINT)
tmp = readl(®s->in_dma_current);
else
tmp = readl(®s->out_dma_current);
tmp -= req->req.dma;
tmp++;
} else
tmp = req->req.actual;
t = scnprintf(next, size,
"\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
done:
local_irq_restore(flags);
*eof = 1;
return count - size;
}
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->ep0state = EP0_DISCONNECT;
dev->irqs = 0;
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep[i];
ep->num = i;
ep->ep.name = names[i];
ep->reg_fifo = &dev->regs->ep_fifo [i];
ep->reg_status = &dev->regs->ep_status [i];
ep->reg_mode = &dev->regs->ep_mode[i];
ep->ep.ops = &goku_ep_ops;
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->dev = dev;
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
}
dev->ep[0].reg_mode = NULL;
dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
list_del_init (&dev->ep[0].ep.ep_list);
}
static void udc_reset(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
writel(0, ®s->power_detect);
writel(0, ®s->int_enable);
readl(®s->int_enable);
dev->int_enable = 0;
/* deassert reset, leave USB D+ at hi-Z (no pullup)
* don't let INT_PWRDETECT sequence begin
*/
udelay(250);
writel(PW_RESETB, ®s->power_detect);
readl(®s->int_enable);
}
static void ep0_start(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned i;
VDBG(dev, "%s\n", __func__);
udc_reset(dev);
udc_reinit (dev);
//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master);
/* hw handles set_address, set_feature, get_status; maybe more */
writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
| G_REQMODE_GET_DESC
| G_REQMODE_CLEAR_FEAT
, ®s->reqmode);
for (i = 0; i < 4; i++)
dev->ep[i].irqs = 0;
/* can't modify descriptors after writing UsbReady */
for (i = 0; i < DESC_LEN; i++)
writel(0, ®s->descriptors[i]);
writel(0, ®s->UsbReady);
/* expect ep0 requests when the host drops reset */
writel(PW_RESETB | PW_PULLUP, ®s->power_detect);
dev->int_enable = INT_DEVWIDE | INT_EP0;
writel(dev->int_enable, &dev->regs->int_enable);
readl(®s->int_enable);
dev->gadget.speed = USB_SPEED_FULL;
dev->ep0state = EP0_IDLE;
}
static void udc_enable(struct goku_udc *dev)
{
/* start enumeration now, or after power detect irq */
if (readl(&dev->regs->power_detect) & PW_DETECT)
ep0_start(dev);
else {
DBG(dev, "%s\n", __func__);
dev->int_enable = INT_PWRDETECT;
writel(dev->int_enable, &dev->regs->int_enable);
}
}
/*-------------------------------------------------------------------------*/
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*/
static struct goku_udc *the_controller;
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int goku_start(struct usb_gadget_driver *driver,
int (*bind)(struct usb_gadget *))
{
struct goku_udc *dev = the_controller;
int retval;
if (!driver
|| driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
return -EINVAL;
if (!dev)
return -ENODEV;
if (dev->driver)
return -EBUSY;
/* hook up the driver */
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = bind(&dev->gadget);
if (retval) {
DBG(dev, "bind to driver %s --> error %d\n",
driver->driver.name, retval);
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
/* then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
static void
stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
{
unsigned i;
DBG (dev, "%s\n", __func__);
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
if (driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
if (dev->driver)
udc_enable(dev);
}
static int goku_stop(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
unsigned long flags;
if (!dev)
return -ENODEV;
if (!driver || driver != dev->driver || !driver->unbind)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
/*-------------------------------------------------------------------------*/
static void ep0_setup(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
struct usb_ctrlrequest ctrl;
int tmp;
/* read SETUP packet and enter DATA stage */
ctrl.bRequestType = readl(®s->bRequestType);
ctrl.bRequest = readl(®s->bRequest);
ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8)
| readl(®s->wValueL));
ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8)
| readl(®s->wIndexL));
ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8)
| readl(®s->wLengthL));
writel(0, ®s->SetupRecv);
nuke(&dev->ep[0], 0);
dev->ep[0].stopped = 0;
if (likely(ctrl.bRequestType & USB_DIR_IN)) {
dev->ep[0].is_in = 1;
dev->ep0state = EP0_IN;
/* detect early status stages */
writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
} else {
dev->ep[0].is_in = 0;
dev->ep0state = EP0_OUT;
/* NOTE: CLEAR_FEATURE is done in software so that we can
* synchronize transfer restarts after bulk IN stalls. data
* won't even enter the fifo until the halt is cleared.
*/
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
switch (ctrl.bRequestType) {
case USB_RECIP_ENDPOINT:
tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
/* active endpoint */
if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
goto stall;
if (ctrl.wIndex & cpu_to_le16(
USB_DIR_IN)) {
if (!dev->ep[tmp].is_in)
goto stall;
} else {
if (dev->ep[tmp].is_in)
goto stall;
}
if (ctrl.wValue != cpu_to_le16(
USB_ENDPOINT_HALT))
goto stall;
if (tmp)
goku_clear_halt(&dev->ep[tmp]);
succeed:
/* start ep0out status stage */
writel(~(1<<0), ®s->EOP);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
return;
case USB_RECIP_DEVICE:
/* device remote wakeup: always clear */
if (ctrl.wValue != cpu_to_le16(1))
goto stall;
VDBG(dev, "clear dev remote wakeup\n");
goto succeed;
case USB_RECIP_INTERFACE:
goto stall;
default: /* pass to gadget driver */
break;
}
break;
default:
break;
}
}
#ifdef USB_TRACE
VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bRequestType, ctrl.bRequest,
le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
le16_to_cpu(ctrl.wLength));
#endif
/* hw wants to know when we're configured (or not) */
dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
&& ctrl.bRequestType == USB_RECIP_DEVICE);
if (unlikely(dev->req_config))
dev->configured = (ctrl.wValue != cpu_to_le16(0));
/* delegate everything to the gadget driver.
* it may respond after this irq handler returns.
*/
spin_unlock (&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &ctrl);
spin_lock (&dev->lock);
if (unlikely(tmp < 0)) {
stall:
#ifdef USB_TRACE
VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
ctrl.bRequestType, ctrl.bRequest, tmp);
#endif
command(regs, COMMAND_STALL, 0);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STALL;
}
/* expect at least one data or status stage irq */
}
#define ACK(irqbit) { \
stat &= ~irqbit; \
writel(~irqbit, ®s->int_status); \
handled = 1; \
}
static irqreturn_t goku_irq(int irq, void *_dev)
{
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
struct goku_ep *ep;
u32 stat, handled = 0;
unsigned i, rescans = 5;
spin_lock(&dev->lock);
rescan:
stat = readl(®s->int_status) & dev->int_enable;
if (!stat)
goto done;
dev->irqs++;
/* device-wide irqs */
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
stop_activity(dev, dev->driver);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
dev->driver = NULL;
goto done;
}
if (stat & INT_PWRDETECT) {
writel(~stat, ®s->int_status);
if (readl(&dev->regs->power_detect) & PW_DETECT) {
VDBG(dev, "connect\n");
ep0_start(dev);
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
stop_activity(dev, dev->driver);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
}
stat = 0;
handled = 1;
goto done;
}
if (stat & INT_SUSPEND) {
ACK(INT_SUSPEND);
if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) {
switch (dev->ep0state) {
case EP0_DISCONNECT:
case EP0_SUSPEND:
goto pm_next;
default:
break;
}
DBG(dev, "USB suspend\n");
dev->ep0state = EP0_SUSPEND;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} else {
if (dev->ep0state != EP0_SUSPEND) {
DBG(dev, "bogus USB resume %d\n",
dev->ep0state);
goto pm_next;
}
DBG(dev, "USB resume\n");
dev->ep0state = EP0_IDLE;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
}
}
pm_next:
if (stat & INT_USBRESET) { /* hub reset done */
ACK(INT_USBRESET);
INFO(dev, "USB reset done, gadget %s\n",
dev->driver->driver.name);
}
// and INT_ERR on some endpoint's crc/bitstuff/... problem
}
/* progress ep0 setup, data, or status stages.
* no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
*/
if (stat & INT_SETUP) {
ACK(INT_SETUP);
dev->ep[0].irqs++;
ep0_setup(dev);
}
if (stat & INT_STATUSNAK) {
ACK(INT_STATUSNAK|INT_ENDPOINT0);
if (dev->ep0state == EP0_IN) {
ep = &dev->ep[0];
ep->irqs++;
nuke(ep, 0);
writel(~(1<<0), ®s->EOP);
dev->ep0state = EP0_STATUS;
}
}
if (stat & INT_ENDPOINT0) {
ACK(INT_ENDPOINT0);
ep = &dev->ep[0];
ep->irqs++;
pio_advance(ep);
}
/* dma completion */
if (stat & INT_MSTRDEND) { /* IN */
ACK(INT_MSTRDEND);
ep = &dev->ep[UDC_MSTRD_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWREND) { /* OUT */
ACK(INT_MSTWREND);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWRTMOUT) { /* OUT */
ACK(INT_MSTWRTMOUT);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
ERROR(dev, "%s write timeout ?\n", ep->ep.name);
// reset dma? then dma_advance()
}
/* pio */
for (i = 1; i < 4; i++) {
u32 tmp = INT_EPxDATASET(i);
if (!(stat & tmp))
continue;
ep = &dev->ep[i];
pio_advance(ep);
if (list_empty (&ep->queue))
pio_irq_disable(dev, regs, i);
stat &= ~tmp;
handled = 1;
ep->irqs++;
}
if (rescans--)
goto rescan;
done:
(void)readl(®s->int_enable);
spin_unlock(&dev->lock);
if (stat)
DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
readl(®s->int_status), dev->int_enable);
return IRQ_RETVAL(handled);
}
#undef ACK
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct goku_udc *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void goku_remove(struct pci_dev *pdev)
{
struct goku_udc *dev = pci_get_drvdata(pdev);
DBG(dev, "%s\n", __func__);
usb_del_gadget_udc(&dev->gadget);
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
udc_reset(dev);
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->got_region)
release_mem_region(pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
if (dev->registered)
device_unregister(&dev->gadget.dev);
pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
the_controller = NULL;
INFO(dev, "unbind\n");
}
/* wrap this driver around the specified pci device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct goku_udc *dev = NULL;
unsigned long resource, len;
void __iomem *base = NULL;
int retval;
/* if you want to support more than one controller in a system,
* usb_gadget_driver_{register,unregister}() must change.
*/
if (the_controller) {
pr_warning("ignoring %s\n", pci_name(pdev));
return -EBUSY;
}
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
goto err;
}
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
pr_debug("enomem %s\n", pci_name(pdev));
retval = -ENOMEM;
goto err;
}
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
retval = pci_enable_device(pdev);
if (retval < 0) {
DBG(dev, "can't enable, %d\n", retval);
goto err;
}
dev->enabled = 1;
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
DBG(dev, "controller already in use\n");
retval = -EBUSY;
goto err;
}
dev->got_region = 1;
base = ioremap_nocache(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
goto err;
}
dev->regs = (struct goku_udc_regs __iomem *) base;
pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto err;
}
dev->got_irq = 1;
if (use_dma)
pci_set_master(pdev);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
the_controller = dev;
retval = device_register(&dev->gadget.dev);
if (retval) {
put_device(&dev->gadget.dev);
goto err;
}
dev->registered = 1;
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (retval)
goto err;
return 0;
err:
if (dev)
goku_remove (pdev);
return retval;
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
.probe = goku_probe,
.remove = goku_remove,
/* FIXME add power management support */
};
static int __init init (void)
{
return pci_register_driver (&goku_pci_driver);
}
module_init (init);
static void __exit cleanup (void)
{
pci_unregister_driver (&goku_pci_driver);
}
module_exit (cleanup);
| gpl-2.0 |
tjstyle/android_kernel_samsung_msm | drivers/misc/ibmasm/uart.c | 5019 | 2063 |
/*
* IBM ASM Service Processor Device Driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <amax@us.ibm.com>
*
*/
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include "ibmasm.h"
#include "lowlevel.h"
void ibmasm_register_uart(struct service_processor *sp)
{
struct uart_port uport;
void __iomem *iomem_base;
iomem_base = sp->base_address + SCOUT_COM_B_BASE;
/* read the uart scratch register to determine if the UART
* is dedicated to the service processor or if the OS can use it
*/
if (0 == readl(iomem_base + UART_SCR)) {
dev_info(sp->dev, "IBM SP UART not registered, owned by service processor\n");
sp->serial_line = -1;
return;
}
memset(&uport, 0, sizeof(struct uart_port));
uport.irq = sp->irq;
uport.uartclk = 3686400;
uport.flags = UPF_SHARE_IRQ;
uport.iotype = UPIO_MEM;
uport.membase = iomem_base;
sp->serial_line = serial8250_register_port(&uport);
if (sp->serial_line < 0) {
dev_err(sp->dev, "Failed to register serial port\n");
return;
}
enable_uart_interrupts(sp->base_address);
}
void ibmasm_unregister_uart(struct service_processor *sp)
{
if (sp->serial_line < 0)
return;
disable_uart_interrupts(sp->base_address);
serial8250_unregister_port(sp->serial_line);
}
| gpl-2.0 |
wolverine2k/android_kernel_oppo_n1 | fs/notify/group.c | 5275 | 3065 | /*
* Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/srcu.h>
#include <linux/rculist.h>
#include <linux/wait.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
#include <linux/atomic.h>
/*
* Final freeing of a group
*/
void fsnotify_final_destroy_group(struct fsnotify_group *group)
{
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
if (group->ops->free_group_priv)
group->ops->free_group_priv(group);
kfree(group);
}
/*
* Trying to get rid of a group. We need to first get rid of any outstanding
* allocations and then free the group. Remember that fsnotify_clear_marks_by_group
* could miss marks that are being freed by inode and those marks could still
* hold a reference to this group (via group->num_marks) If we get into that
* situtation, the fsnotify_final_destroy_group will get called when that final
* mark is freed.
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
/* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
synchronize_srcu(&fsnotify_mark_srcu);
/* past the point of no return, matches the initial value of 1 */
if (atomic_dec_and_test(&group->num_marks))
fsnotify_final_destroy_group(group);
}
/*
* Drop a reference to a group. Free it if it's through.
*/
void fsnotify_put_group(struct fsnotify_group *group)
{
if (atomic_dec_and_test(&group->refcnt))
fsnotify_destroy_group(group);
}
/*
* Create a new fsnotify_group and hold a reference for the group returned.
*/
struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
{
struct fsnotify_group *group;
group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
/* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
/*
* hits 0 when there are no external references AND no marks for
* this group
*/
atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
group->max_events = UINT_MAX;
spin_lock_init(&group->mark_lock);
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
return group;
}
| gpl-2.0 |
thestealth131205/HTC_Desire_510_64bit | sound/soc/pxa/saarb.c | 7323 | 5257 | /*
* saarb.c -- SoC audio for saarb
*
* Copyright (C) 2010 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/jack.h>
#include <asm/mach-types.h>
#include "../codecs/88pm860x-codec.h"
#include "pxa-ssp.h"
static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd);
static struct platform_device *saarb_snd_device;
static struct snd_soc_jack hs_jack, mic_jack;
static struct snd_soc_jack_pin hs_jack_pins[] = {
{ .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, },
};
static struct snd_soc_jack_pin mic_jack_pins[] = {
{ .pin = "Headset Mic 2", .mask = SND_JACK_MICROPHONE, },
};
/* saarb machine dapm widgets */
static const struct snd_soc_dapm_widget saarb_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Stereophone", NULL),
SND_SOC_DAPM_LINE("Lineout Out 1", NULL),
SND_SOC_DAPM_LINE("Lineout Out 2", NULL),
SND_SOC_DAPM_SPK("Ext Speaker", NULL),
SND_SOC_DAPM_MIC("Ext Mic 1", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Ext Mic 3", NULL),
};
/* saarb machine audio map */
static const struct snd_soc_dapm_route saarb_audio_map[] = {
{"Headset Stereophone", NULL, "HS1"},
{"Headset Stereophone", NULL, "HS2"},
{"Ext Speaker", NULL, "LSP"},
{"Ext Speaker", NULL, "LSN"},
{"Lineout Out 1", NULL, "LINEOUT1"},
{"Lineout Out 2", NULL, "LINEOUT2"},
{"MIC1P", NULL, "Mic1 Bias"},
{"MIC1N", NULL, "Mic1 Bias"},
{"Mic1 Bias", NULL, "Ext Mic 1"},
{"MIC2P", NULL, "Mic1 Bias"},
{"MIC2N", NULL, "Mic1 Bias"},
{"Mic1 Bias", NULL, "Headset Mic 2"},
{"MIC3P", NULL, "Mic3 Bias"},
{"MIC3N", NULL, "Mic3 Bias"},
{"Mic3 Bias", NULL, "Ext Mic 3"},
};
static int saarb_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int width = snd_pcm_format_physical_width(params_format(params));
int ret;
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_NET_PLL, 0,
PM860X_CLK_DIR_OUT);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_sysclk(codec_dai, 0, 0, PM860X_CLK_DIR_OUT);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 3, 3, 2, width);
return ret;
}
static struct snd_soc_ops saarb_i2s_ops = {
.hw_params = saarb_i2s_hw_params,
};
static struct snd_soc_dai_link saarb_dai[] = {
{
.name = "88PM860x I2S",
.stream_name = "I2S Audio",
.cpu_dai_name = "pxa-ssp-dai.1",
.codec_dai_name = "88pm860x-i2s",
.platform_name = "pxa-pcm-audio",
.codec_name = "88pm860x-codec",
.init = saarb_pm860x_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.ops = &saarb_i2s_ops,
},
};
static struct snd_soc_card snd_soc_card_saarb = {
.name = "Saarb",
.owner = THIS_MODULE,
.dai_link = saarb_dai,
.num_links = ARRAY_SIZE(saarb_dai),
.dapm_widgets = saarb_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(saarb_dapm_widgets),
.dapm_routes = saarb_audio_map,
.num_dapm_routes = ARRAY_SIZE(saarb_audio_map),
};
static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* connected pins */
snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
/* Headset jack detection */
snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE
| SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
&hs_jack);
snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
hs_jack_pins);
snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE,
&mic_jack);
snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
mic_jack_pins);
/* headphone, microphone detection & headset short detection */
pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE,
SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2);
pm860x_mic_jack_detect(codec, &hs_jack, SND_JACK_MICROPHONE);
return 0;
}
static int __init saarb_init(void)
{
int ret;
if (!machine_is_saarb())
return -ENODEV;
saarb_snd_device = platform_device_alloc("soc-audio", -1);
if (!saarb_snd_device)
return -ENOMEM;
platform_set_drvdata(saarb_snd_device, &snd_soc_card_saarb);
ret = platform_device_add(saarb_snd_device);
if (ret)
platform_device_put(saarb_snd_device);
return ret;
}
static void __exit saarb_exit(void)
{
platform_device_unregister(saarb_snd_device);
}
module_init(saarb_init);
module_exit(saarb_exit);
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_DESCRIPTION("ALSA SoC 88PM860x Saarb");
MODULE_LICENSE("GPL");
| gpl-2.0 |
golden-guy/android_kernel_samsung_golden | drivers/staging/comedi/drivers/vmk80xx.c | 7835 | 34095 | /*
comedi/drivers/vmk80xx.c
Velleman USB Board Low-Level Driver
Copyright (C) 2009 Manuel Gebele <forensixs@gmx.de>, Germany
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: vmk80xx
Description: Velleman USB Board Low-Level Driver
Devices: K8055/K8061 aka VM110/VM140
Author: Manuel Gebele <forensixs@gmx.de>
Updated: Sun, 10 May 2009 11:14:59 +0200
Status: works
Supports:
- analog input
- analog output
- digital input
- digital output
- counter
- pwm
*/
/*
Changelog:
0.8.81 -3- code completely rewritten (adjust driver logic)
0.8.81 -2- full support for K8061
0.8.81 -1- fix some mistaken among others the number of
supported boards and I/O handling
0.7.76 -4- renamed to vmk80xx
0.7.76 -3- detect K8061 (only theoretically supported)
0.7.76 -2- code completely rewritten (adjust driver logic)
0.7.76 -1- support for digital and counter subdevice
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/usb.h>
#include <linux/uaccess.h>
#include "../comedidev.h"
#define BOARDNAME "vmk80xx"
MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>");
MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver");
MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140");
MODULE_VERSION("0.8.01");
MODULE_LICENSE("GPL");
enum {
DEVICE_VMK8055,
DEVICE_VMK8061
};
static const struct usb_device_id vmk80xx_id_table[] = {
{USB_DEVICE(0x10cf, 0x5500), .driver_info = DEVICE_VMK8055},
{USB_DEVICE(0x10cf, 0x5501), .driver_info = DEVICE_VMK8055},
{USB_DEVICE(0x10cf, 0x5502), .driver_info = DEVICE_VMK8055},
{USB_DEVICE(0x10cf, 0x5503), .driver_info = DEVICE_VMK8055},
{USB_DEVICE(0x10cf, 0x8061), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8062), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8063), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8064), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8065), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8066), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8067), .driver_info = DEVICE_VMK8061},
{USB_DEVICE(0x10cf, 0x8068), .driver_info = DEVICE_VMK8061},
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, vmk80xx_id_table);
#define VMK8055_DI_REG 0x00
#define VMK8055_DO_REG 0x01
#define VMK8055_AO1_REG 0x02
#define VMK8055_AO2_REG 0x03
#define VMK8055_AI1_REG 0x02
#define VMK8055_AI2_REG 0x03
#define VMK8055_CNT1_REG 0x04
#define VMK8055_CNT2_REG 0x06
#define VMK8061_CH_REG 0x01
#define VMK8061_DI_REG 0x01
#define VMK8061_DO_REG 0x01
#define VMK8061_PWM_REG1 0x01
#define VMK8061_PWM_REG2 0x02
#define VMK8061_CNT_REG 0x02
#define VMK8061_AO_REG 0x02
#define VMK8061_AI_REG1 0x02
#define VMK8061_AI_REG2 0x03
#define VMK8055_CMD_RST 0x00
#define VMK8055_CMD_DEB1_TIME 0x01
#define VMK8055_CMD_DEB2_TIME 0x02
#define VMK8055_CMD_RST_CNT1 0x03
#define VMK8055_CMD_RST_CNT2 0x04
#define VMK8055_CMD_WRT_AD 0x05
#define VMK8061_CMD_RD_AI 0x00
#define VMK8061_CMR_RD_ALL_AI 0x01 /* !non-active! */
#define VMK8061_CMD_SET_AO 0x02
#define VMK8061_CMD_SET_ALL_AO 0x03 /* !non-active! */
#define VMK8061_CMD_OUT_PWM 0x04
#define VMK8061_CMD_RD_DI 0x05
#define VMK8061_CMD_DO 0x06 /* !non-active! */
#define VMK8061_CMD_CLR_DO 0x07
#define VMK8061_CMD_SET_DO 0x08
#define VMK8061_CMD_RD_CNT 0x09 /* TODO: completely pointless? */
#define VMK8061_CMD_RST_CNT 0x0a /* TODO: completely pointless? */
#define VMK8061_CMD_RD_VERSION 0x0b /* internal usage */
#define VMK8061_CMD_RD_JMP_STAT 0x0c /* TODO: not implemented yet */
#define VMK8061_CMD_RD_PWR_STAT 0x0d /* internal usage */
#define VMK8061_CMD_RD_DO 0x0e
#define VMK8061_CMD_RD_AO 0x0f
#define VMK8061_CMD_RD_PWM 0x10
#define VMK80XX_MAX_BOARDS COMEDI_NUM_BOARD_MINORS
#define TRANS_OUT_BUSY 1
#define TRANS_IN_BUSY 2
#define TRANS_IN_RUNNING 3
#define IC3_VERSION (1 << 0)
#define IC6_VERSION (1 << 1)
#define URB_RCV_FLAG (1 << 0)
#define URB_SND_FLAG (1 << 1)
#define CONFIG_VMK80XX_DEBUG
#undef CONFIG_VMK80XX_DEBUG
#ifdef CONFIG_VMK80XX_DEBUG
static int dbgvm = 1;
#else
static int dbgvm;
#endif
#ifdef CONFIG_COMEDI_DEBUG
static int dbgcm = 1;
#else
static int dbgcm;
#endif
#define dbgvm(fmt, arg...) \
do { \
if (dbgvm) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
#define dbgcm(fmt, arg...) \
do { \
if (dbgcm) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
enum vmk80xx_model {
VMK8055_MODEL,
VMK8061_MODEL
};
struct firmware_version {
unsigned char ic3_vers[32]; /* USB-Controller */
unsigned char ic6_vers[32]; /* CPU */
};
static const struct comedi_lrange vmk8055_range = {
1, {UNI_RANGE(5)}
};
static const struct comedi_lrange vmk8061_range = {
2, {UNI_RANGE(5), UNI_RANGE(10)}
};
struct vmk80xx_board {
const char *name;
enum vmk80xx_model model;
const struct comedi_lrange *range;
__u8 ai_chans;
__le16 ai_bits;
__u8 ao_chans;
__le16 ao_bits;
__u8 di_chans;
__le16 di_bits;
__u8 do_chans;
__le16 do_bits;
__u8 cnt_chans;
__le16 cnt_bits;
__u8 pwm_chans;
__le16 pwm_bits;
};
enum {
VMK80XX_SUBD_AI,
VMK80XX_SUBD_AO,
VMK80XX_SUBD_DI,
VMK80XX_SUBD_DO,
VMK80XX_SUBD_CNT,
VMK80XX_SUBD_PWM,
};
struct vmk80xx_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct usb_endpoint_descriptor *ep_rx;
struct usb_endpoint_descriptor *ep_tx;
struct usb_anchor rx_anchor;
struct usb_anchor tx_anchor;
struct vmk80xx_board board;
struct firmware_version fw;
struct semaphore limit_sem;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
unsigned char *usb_rx_buf;
unsigned char *usb_tx_buf;
unsigned long flags;
int probed;
int attached;
int count;
};
static struct vmk80xx_usb vmb[VMK80XX_MAX_BOARDS];
static DEFINE_MUTEX(glb_mutex);
static void vmk80xx_tx_callback(struct urb *urb)
{
struct vmk80xx_usb *dev = urb->context;
int stat = urb->status;
dbgvm("vmk80xx: %s\n", __func__);
if (stat && !(stat == -ENOENT
|| stat == -ECONNRESET || stat == -ESHUTDOWN))
dbgcm("comedi#: vmk80xx: %s - nonzero urb status (%d)\n",
__func__, stat);
if (!test_bit(TRANS_OUT_BUSY, &dev->flags))
return;
clear_bit(TRANS_OUT_BUSY, &dev->flags);
wake_up_interruptible(&dev->write_wait);
}
static void vmk80xx_rx_callback(struct urb *urb)
{
struct vmk80xx_usb *dev = urb->context;
int stat = urb->status;
dbgvm("vmk80xx: %s\n", __func__);
switch (stat) {
case 0:
break;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
break;
default:
dbgcm("comedi#: vmk80xx: %s - nonzero urb status (%d)\n",
__func__, stat);
goto resubmit;
}
goto exit;
resubmit:
if (test_bit(TRANS_IN_RUNNING, &dev->flags) && dev->intf) {
usb_anchor_urb(urb, &dev->rx_anchor);
if (!usb_submit_urb(urb, GFP_KERNEL))
goto exit;
err("comedi#: vmk80xx: %s - submit urb failed\n", __func__);
usb_unanchor_urb(urb);
}
exit:
clear_bit(TRANS_IN_BUSY, &dev->flags);
wake_up_interruptible(&dev->read_wait);
}
static int vmk80xx_check_data_link(struct vmk80xx_usb *dev)
{
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
unsigned char rx[2];
dbgvm("vmk80xx: %s\n", __func__);
tx_pipe = usb_sndbulkpipe(dev->udev, 0x01);
rx_pipe = usb_rcvbulkpipe(dev->udev, 0x81);
tx[0] = VMK8061_CMD_RD_PWR_STAT;
/*
* Check that IC6 (PIC16F871) is powered and
* running and the data link between IC3 and
* IC6 is working properly
*/
usb_bulk_msg(dev->udev, tx_pipe, tx, 1, NULL, dev->ep_tx->bInterval);
usb_bulk_msg(dev->udev, rx_pipe, rx, 2, NULL, HZ * 10);
return (int)rx[1];
}
static void vmk80xx_read_eeprom(struct vmk80xx_usb *dev, int flag)
{
unsigned int tx_pipe;
unsigned int rx_pipe;
unsigned char tx[1];
unsigned char rx[64];
int cnt;
dbgvm("vmk80xx: %s\n", __func__);
tx_pipe = usb_sndbulkpipe(dev->udev, 0x01);
rx_pipe = usb_rcvbulkpipe(dev->udev, 0x81);
tx[0] = VMK8061_CMD_RD_VERSION;
/*
* Read the firmware version info of IC3 and
* IC6 from the internal EEPROM of the IC
*/
usb_bulk_msg(dev->udev, tx_pipe, tx, 1, NULL, dev->ep_tx->bInterval);
usb_bulk_msg(dev->udev, rx_pipe, rx, 64, &cnt, HZ * 10);
rx[cnt] = '\0';
if (flag & IC3_VERSION)
strncpy(dev->fw.ic3_vers, rx + 1, 24);
else /* IC6_VERSION */
strncpy(dev->fw.ic6_vers, rx + 25, 24);
}
static int vmk80xx_reset_device(struct vmk80xx_usb *dev)
{
struct urb *urb;
unsigned int tx_pipe;
int ival;
size_t size;
dbgvm("vmk80xx: %s\n", __func__);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
tx_pipe = usb_sndintpipe(dev->udev, 0x01);
ival = dev->ep_tx->bInterval;
size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
dev->usb_tx_buf[0] = VMK8055_CMD_RST;
dev->usb_tx_buf[1] = 0x00;
dev->usb_tx_buf[2] = 0x00;
dev->usb_tx_buf[3] = 0x00;
dev->usb_tx_buf[4] = 0x00;
dev->usb_tx_buf[5] = 0x00;
dev->usb_tx_buf[6] = 0x00;
dev->usb_tx_buf[7] = 0x00;
usb_fill_int_urb(urb, dev->udev, tx_pipe, dev->usb_tx_buf,
size, vmk80xx_tx_callback, dev, ival);
usb_anchor_urb(urb, &dev->tx_anchor);
return usb_submit_urb(urb, GFP_KERNEL);
}
static void vmk80xx_build_int_urb(struct urb *urb, int flag)
{
struct vmk80xx_usb *dev = urb->context;
__u8 rx_addr;
__u8 tx_addr;
unsigned int pipe;
unsigned char *buf;
size_t size;
void (*callback) (struct urb *);
int ival;
dbgvm("vmk80xx: %s\n", __func__);
if (flag & URB_RCV_FLAG) {
rx_addr = dev->ep_rx->bEndpointAddress;
pipe = usb_rcvintpipe(dev->udev, rx_addr);
buf = dev->usb_rx_buf;
size = le16_to_cpu(dev->ep_rx->wMaxPacketSize);
callback = vmk80xx_rx_callback;
ival = dev->ep_rx->bInterval;
} else { /* URB_SND_FLAG */
tx_addr = dev->ep_tx->bEndpointAddress;
pipe = usb_sndintpipe(dev->udev, tx_addr);
buf = dev->usb_tx_buf;
size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
callback = vmk80xx_tx_callback;
ival = dev->ep_tx->bInterval;
}
usb_fill_int_urb(urb, dev->udev, pipe, buf, size, callback, dev, ival);
}
static void vmk80xx_do_bulk_msg(struct vmk80xx_usb *dev)
{
__u8 tx_addr;
__u8 rx_addr;
unsigned int tx_pipe;
unsigned int rx_pipe;
size_t size;
dbgvm("vmk80xx: %s\n", __func__);
set_bit(TRANS_IN_BUSY, &dev->flags);
set_bit(TRANS_OUT_BUSY, &dev->flags);
tx_addr = dev->ep_tx->bEndpointAddress;
rx_addr = dev->ep_rx->bEndpointAddress;
tx_pipe = usb_sndbulkpipe(dev->udev, tx_addr);
rx_pipe = usb_rcvbulkpipe(dev->udev, rx_addr);
/*
* The max packet size attributes of the K8061
* input/output endpoints are identical
*/
size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
usb_bulk_msg(dev->udev, tx_pipe, dev->usb_tx_buf,
size, NULL, dev->ep_tx->bInterval);
usb_bulk_msg(dev->udev, rx_pipe, dev->usb_rx_buf, size, NULL, HZ * 10);
clear_bit(TRANS_OUT_BUSY, &dev->flags);
clear_bit(TRANS_IN_BUSY, &dev->flags);
}
static int vmk80xx_read_packet(struct vmk80xx_usb *dev)
{
struct urb *urb;
int retval;
dbgvm("vmk80xx: %s\n", __func__);
if (!dev->intf)
return -ENODEV;
/* Only useful for interrupt transfers */
if (test_bit(TRANS_IN_BUSY, &dev->flags))
if (wait_event_interruptible(dev->read_wait,
!test_bit(TRANS_IN_BUSY,
&dev->flags)))
return -ERESTART;
if (dev->board.model == VMK8061_MODEL) {
vmk80xx_do_bulk_msg(dev);
return 0;
}
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
urb->context = dev;
vmk80xx_build_int_urb(urb, URB_RCV_FLAG);
set_bit(TRANS_IN_RUNNING, &dev->flags);
set_bit(TRANS_IN_BUSY, &dev->flags);
usb_anchor_urb(urb, &dev->rx_anchor);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (!retval)
goto exit;
clear_bit(TRANS_IN_RUNNING, &dev->flags);
usb_unanchor_urb(urb);
exit:
usb_free_urb(urb);
return retval;
}
static int vmk80xx_write_packet(struct vmk80xx_usb *dev, int cmd)
{
struct urb *urb;
int retval;
dbgvm("vmk80xx: %s\n", __func__);
if (!dev->intf)
return -ENODEV;
if (test_bit(TRANS_OUT_BUSY, &dev->flags))
if (wait_event_interruptible(dev->write_wait,
!test_bit(TRANS_OUT_BUSY,
&dev->flags)))
return -ERESTART;
if (dev->board.model == VMK8061_MODEL) {
dev->usb_tx_buf[0] = cmd;
vmk80xx_do_bulk_msg(dev);
return 0;
}
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
urb->context = dev;
vmk80xx_build_int_urb(urb, URB_SND_FLAG);
set_bit(TRANS_OUT_BUSY, &dev->flags);
usb_anchor_urb(urb, &dev->tx_anchor);
dev->usb_tx_buf[0] = cmd;
retval = usb_submit_urb(urb, GFP_KERNEL);
if (!retval)
goto exit;
clear_bit(TRANS_OUT_BUSY, &dev->flags);
usb_unanchor_urb(urb);
exit:
usb_free_urb(urb);
return retval;
}
#define DIR_IN 1
#define DIR_OUT 2
static int rudimentary_check(struct vmk80xx_usb *dev, int dir)
{
if (!dev)
return -EFAULT;
if (!dev->probed)
return -ENODEV;
if (!dev->attached)
return -ENODEV;
if (dir & DIR_IN) {
if (test_bit(TRANS_IN_BUSY, &dev->flags))
return -EBUSY;
}
if (dir & DIR_OUT) {
if (test_bit(TRANS_OUT_BUSY, &dev->flags))
return -EBUSY;
}
return 0;
}
static int vmk80xx_ai_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
int reg[2];
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (dev->board.model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_AI1_REG;
else
reg[0] = VMK8055_AI2_REG;
break;
case VMK8061_MODEL:
reg[0] = VMK8061_AI_REG1;
reg[1] = VMK8061_AI_REG2;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_AI;
dev->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
if (dev->board.model == VMK8055_MODEL) {
data[n] = dev->usb_rx_buf[reg[0]];
continue;
}
/* VMK8061_MODEL */
data[n] = dev->usb_rx_buf[reg[0]] + 256 *
dev->usb_rx_buf[reg[1]];
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_ao_winsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
int cmd;
int reg;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_OUT);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (dev->board.model) {
case VMK8055_MODEL:
cmd = VMK8055_CMD_WRT_AD;
if (!chan)
reg = VMK8055_AO1_REG;
else
reg = VMK8055_AO2_REG;
break;
default: /* NOTE: avoid compiler warnings */
cmd = VMK8061_CMD_SET_AO;
reg = VMK8061_AO_REG;
dev->usb_tx_buf[VMK8061_CH_REG] = chan;
break;
}
for (n = 0; n < insn->n; n++) {
dev->usb_tx_buf[reg] = data[n];
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_ao_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
int reg;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
reg = VMK8061_AO_REG - 1;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_AO;
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
data[n] = dev->usb_rx_buf[reg + chan];
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_di_bits(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
unsigned char *rx_buf;
int reg;
int retval;
dbgvm("vmk80xx: %s\n", __func__);
retval = rudimentary_check(dev, DIR_IN);
if (retval)
return retval;
down(&dev->limit_sem);
rx_buf = dev->usb_rx_buf;
if (dev->board.model == VMK8061_MODEL) {
reg = VMK8061_DI_REG;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
} else {
reg = VMK8055_DI_REG;
}
retval = vmk80xx_read_packet(dev);
if (!retval) {
if (dev->board.model == VMK8055_MODEL)
data[1] = (((rx_buf[reg] >> 4) & 0x03) |
((rx_buf[reg] << 2) & 0x04) |
((rx_buf[reg] >> 3) & 0x18));
else
data[1] = rx_buf[reg];
retval = 2;
}
up(&dev->limit_sem);
return retval;
}
static int vmk80xx_di_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
unsigned char *rx_buf;
int reg;
int inp;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
rx_buf = dev->usb_rx_buf;
if (dev->board.model == VMK8061_MODEL) {
reg = VMK8061_DI_REG;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_DI;
} else {
reg = VMK8055_DI_REG;
}
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
if (dev->board.model == VMK8055_MODEL)
inp = (((rx_buf[reg] >> 4) & 0x03) |
((rx_buf[reg] << 2) & 0x04) |
((rx_buf[reg] >> 3) & 0x18));
else
inp = rx_buf[reg];
data[n] = (inp >> chan) & 1;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_do_winsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
unsigned char *tx_buf;
int reg;
int cmd;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_OUT);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
tx_buf = dev->usb_tx_buf;
for (n = 0; n < insn->n; n++) {
if (dev->board.model == VMK8055_MODEL) {
reg = VMK8055_DO_REG;
cmd = VMK8055_CMD_WRT_AD;
if (data[n] == 1)
tx_buf[reg] |= (1 << chan);
else
tx_buf[reg] ^= (1 << chan);
} else { /* VMK8061_MODEL */
reg = VMK8061_DO_REG;
if (data[n] == 1) {
cmd = VMK8061_CMD_SET_DO;
tx_buf[reg] = 1 << chan;
} else {
cmd = VMK8061_CMD_CLR_DO;
tx_buf[reg] = 0xff - (1 << chan);
}
}
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_do_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
int reg;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
reg = VMK8061_DO_REG;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_DO;
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
data[n] = (dev->usb_rx_buf[reg] >> chan) & 1;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_do_bits(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
unsigned char *rx_buf, *tx_buf;
int dir, reg, cmd;
int retval;
dbgvm("vmk80xx: %s\n", __func__);
dir = 0;
if (data[0])
dir |= DIR_OUT;
if (dev->board.model == VMK8061_MODEL)
dir |= DIR_IN;
retval = rudimentary_check(dev, dir);
if (retval)
return retval;
down(&dev->limit_sem);
rx_buf = dev->usb_rx_buf;
tx_buf = dev->usb_tx_buf;
if (data[0]) {
if (dev->board.model == VMK8055_MODEL) {
reg = VMK8055_DO_REG;
cmd = VMK8055_CMD_WRT_AD;
} else { /* VMK8061_MODEL */
reg = VMK8061_DO_REG;
cmd = VMK8061_CMD_DO;
}
tx_buf[reg] &= ~data[0];
tx_buf[reg] |= (data[0] & data[1]);
retval = vmk80xx_write_packet(dev, cmd);
if (retval)
goto out;
}
if (dev->board.model == VMK8061_MODEL) {
reg = VMK8061_DO_REG;
tx_buf[0] = VMK8061_CMD_RD_DO;
retval = vmk80xx_read_packet(dev);
if (!retval) {
data[1] = rx_buf[reg];
retval = 2;
}
} else {
data[1] = tx_buf[reg];
retval = 2;
}
out:
up(&dev->limit_sem);
return retval;
}
static int vmk80xx_cnt_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int chan;
int reg[2];
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
switch (dev->board.model) {
case VMK8055_MODEL:
if (!chan)
reg[0] = VMK8055_CNT1_REG;
else
reg[0] = VMK8055_CNT2_REG;
break;
case VMK8061_MODEL:
reg[0] = VMK8061_CNT_REG;
reg[1] = VMK8061_CNT_REG;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_CNT;
break;
}
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
if (dev->board.model == VMK8055_MODEL)
data[n] = dev->usb_rx_buf[reg[0]];
else /* VMK8061_MODEL */
data[n] = dev->usb_rx_buf[reg[0] * (chan + 1) + 1]
+ 256 * dev->usb_rx_buf[reg[1] * 2 + 2];
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_cnt_cinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
unsigned int insn_cmd;
int chan;
int cmd;
int reg;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_OUT);
if (n)
return n;
down(&dev->limit_sem);
insn_cmd = data[0];
if (insn_cmd != INSN_CONFIG_RESET && insn_cmd != GPCT_RESET)
return -EINVAL;
chan = CR_CHAN(insn->chanspec);
if (dev->board.model == VMK8055_MODEL) {
if (!chan) {
cmd = VMK8055_CMD_RST_CNT1;
reg = VMK8055_CNT1_REG;
} else {
cmd = VMK8055_CMD_RST_CNT2;
reg = VMK8055_CNT2_REG;
}
dev->usb_tx_buf[reg] = 0x00;
} else {
cmd = VMK8061_CMD_RST_CNT;
}
for (n = 0; n < insn->n; n++)
if (vmk80xx_write_packet(dev, cmd))
break;
up(&dev->limit_sem);
return n;
}
static int vmk80xx_cnt_winsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
unsigned long debtime;
unsigned long val;
int chan;
int cmd;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_OUT);
if (n)
return n;
down(&dev->limit_sem);
chan = CR_CHAN(insn->chanspec);
if (!chan)
cmd = VMK8055_CMD_DEB1_TIME;
else
cmd = VMK8055_CMD_DEB2_TIME;
for (n = 0; n < insn->n; n++) {
debtime = data[n];
if (debtime == 0)
debtime = 1;
/* TODO: Prevent overflows */
if (debtime > 7450)
debtime = 7450;
val = int_sqrt(debtime * 1000 / 115);
if (((val + 1) * val) < debtime * 1000 / 115)
val += 1;
dev->usb_tx_buf[6 + chan] = val;
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_pwm_rinsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
int reg[2];
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_IN);
if (n)
return n;
down(&dev->limit_sem);
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
dev->usb_tx_buf[0] = VMK8061_CMD_RD_PWM;
for (n = 0; n < insn->n; n++) {
if (vmk80xx_read_packet(dev))
break;
data[n] = dev->usb_rx_buf[reg[0]] + 4 * dev->usb_rx_buf[reg[1]];
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_pwm_winsn(struct comedi_device *cdev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct vmk80xx_usb *dev = cdev->private;
unsigned char *tx_buf;
int reg[2];
int cmd;
int n;
dbgvm("vmk80xx: %s\n", __func__);
n = rudimentary_check(dev, DIR_OUT);
if (n)
return n;
down(&dev->limit_sem);
tx_buf = dev->usb_tx_buf;
reg[0] = VMK8061_PWM_REG1;
reg[1] = VMK8061_PWM_REG2;
cmd = VMK8061_CMD_OUT_PWM;
/*
* The followin piece of code was translated from the inline
* assembler code in the DLL source code.
*
* asm
* mov eax, k ; k is the value (data[n])
* and al, 03h ; al are the lower 8 bits of eax
* mov lo, al ; lo is the low part (tx_buf[reg[0]])
* mov eax, k
* shr eax, 2 ; right shift eax register by 2
* mov hi, al ; hi is the high part (tx_buf[reg[1]])
* end;
*/
for (n = 0; n < insn->n; n++) {
tx_buf[reg[0]] = (unsigned char)(data[n] & 0x03);
tx_buf[reg[1]] = (unsigned char)(data[n] >> 2) & 0xff;
if (vmk80xx_write_packet(dev, cmd))
break;
}
up(&dev->limit_sem);
return n;
}
static int vmk80xx_attach(struct comedi_device *cdev,
struct comedi_devconfig *it)
{
int i;
struct vmk80xx_usb *dev;
int n_subd;
struct comedi_subdevice *s;
int minor;
dbgvm("vmk80xx: %s\n", __func__);
mutex_lock(&glb_mutex);
for (i = 0; i < VMK80XX_MAX_BOARDS; i++)
if (vmb[i].probed && !vmb[i].attached)
break;
if (i == VMK80XX_MAX_BOARDS) {
mutex_unlock(&glb_mutex);
return -ENODEV;
}
dev = &vmb[i];
down(&dev->limit_sem);
cdev->board_name = dev->board.name;
cdev->private = dev;
if (dev->board.model == VMK8055_MODEL)
n_subd = 5;
else
n_subd = 6;
if (alloc_subdevices(cdev, n_subd) < 0) {
up(&dev->limit_sem);
mutex_unlock(&glb_mutex);
return -ENOMEM;
}
/* Analog input subdevice */
s = cdev->subdevices + VMK80XX_SUBD_AI;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = dev->board.ai_chans;
s->maxdata = (1 << dev->board.ai_bits) - 1;
s->range_table = dev->board.range;
s->insn_read = vmk80xx_ai_rinsn;
/* Analog output subdevice */
s = cdev->subdevices + VMK80XX_SUBD_AO;
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
s->n_chan = dev->board.ao_chans;
s->maxdata = (1 << dev->board.ao_bits) - 1;
s->range_table = dev->board.range;
s->insn_write = vmk80xx_ao_winsn;
if (dev->board.model == VMK8061_MODEL) {
s->subdev_flags |= SDF_READABLE;
s->insn_read = vmk80xx_ao_rinsn;
}
/* Digital input subdevice */
s = cdev->subdevices + VMK80XX_SUBD_DI;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = dev->board.di_chans;
s->maxdata = 1;
s->insn_read = vmk80xx_di_rinsn;
s->insn_bits = vmk80xx_di_bits;
/* Digital output subdevice */
s = cdev->subdevices + VMK80XX_SUBD_DO;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND;
s->n_chan = dev->board.do_chans;
s->maxdata = 1;
s->insn_write = vmk80xx_do_winsn;
s->insn_bits = vmk80xx_do_bits;
if (dev->board.model == VMK8061_MODEL) {
s->subdev_flags |= SDF_READABLE;
s->insn_read = vmk80xx_do_rinsn;
}
/* Counter subdevice */
s = cdev->subdevices + VMK80XX_SUBD_CNT;
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE;
s->n_chan = dev->board.cnt_chans;
s->insn_read = vmk80xx_cnt_rinsn;
s->insn_config = vmk80xx_cnt_cinsn;
if (dev->board.model == VMK8055_MODEL) {
s->subdev_flags |= SDF_WRITEABLE;
s->maxdata = (1 << dev->board.cnt_bits) - 1;
s->insn_write = vmk80xx_cnt_winsn;
}
/* PWM subdevice */
if (dev->board.model == VMK8061_MODEL) {
s = cdev->subdevices + VMK80XX_SUBD_PWM;
s->type = COMEDI_SUBD_PWM;
s->subdev_flags = SDF_READABLE | SDF_WRITEABLE;
s->n_chan = dev->board.pwm_chans;
s->maxdata = (1 << dev->board.pwm_bits) - 1;
s->insn_read = vmk80xx_pwm_rinsn;
s->insn_write = vmk80xx_pwm_winsn;
}
dev->attached = 1;
minor = cdev->minor;
printk(KERN_INFO
"comedi%d: vmk80xx: board #%d [%s] attached to comedi\n",
minor, dev->count, dev->board.name);
up(&dev->limit_sem);
mutex_unlock(&glb_mutex);
return 0;
}
static int vmk80xx_detach(struct comedi_device *cdev)
{
struct vmk80xx_usb *dev;
int minor;
dbgvm("vmk80xx: %s\n", __func__);
if (!cdev)
return -EFAULT;
dev = cdev->private;
if (!dev)
return -EFAULT;
down(&dev->limit_sem);
cdev->private = NULL;
dev->attached = 0;
minor = cdev->minor;
printk(KERN_INFO
"comedi%d: vmk80xx: board #%d [%s] detached from comedi\n",
minor, dev->count, dev->board.name);
up(&dev->limit_sem);
return 0;
}
static int vmk80xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int i;
struct vmk80xx_usb *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *ep_desc;
size_t size;
dbgvm("vmk80xx: %s\n", __func__);
mutex_lock(&glb_mutex);
for (i = 0; i < VMK80XX_MAX_BOARDS; i++)
if (!vmb[i].probed)
break;
if (i == VMK80XX_MAX_BOARDS) {
mutex_unlock(&glb_mutex);
return -EMFILE;
}
dev = &vmb[i];
memset(dev, 0x00, sizeof(struct vmk80xx_usb));
dev->count = i;
iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != 2)
goto error;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
ep_desc = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep_desc)) {
dev->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc)) {
dev->ep_tx = ep_desc;
continue;
}
if (usb_endpoint_is_bulk_in(ep_desc)) {
dev->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_bulk_out(ep_desc)) {
dev->ep_tx = ep_desc;
continue;
}
}
if (!dev->ep_rx || !dev->ep_tx)
goto error;
size = le16_to_cpu(dev->ep_rx->wMaxPacketSize);
dev->usb_rx_buf = kmalloc(size, GFP_KERNEL);
if (!dev->usb_rx_buf) {
mutex_unlock(&glb_mutex);
return -ENOMEM;
}
size = le16_to_cpu(dev->ep_tx->wMaxPacketSize);
dev->usb_tx_buf = kmalloc(size, GFP_KERNEL);
if (!dev->usb_tx_buf) {
kfree(dev->usb_rx_buf);
mutex_unlock(&glb_mutex);
return -ENOMEM;
}
dev->udev = interface_to_usbdev(intf);
dev->intf = intf;
sema_init(&dev->limit_sem, 8);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
init_usb_anchor(&dev->rx_anchor);
init_usb_anchor(&dev->tx_anchor);
usb_set_intfdata(intf, dev);
switch (id->driver_info) {
case DEVICE_VMK8055:
dev->board.name = "K8055 (VM110)";
dev->board.model = VMK8055_MODEL;
dev->board.range = &vmk8055_range;
dev->board.ai_chans = 2;
dev->board.ai_bits = 8;
dev->board.ao_chans = 2;
dev->board.ao_bits = 8;
dev->board.di_chans = 5;
dev->board.di_bits = 1;
dev->board.do_chans = 8;
dev->board.do_bits = 1;
dev->board.cnt_chans = 2;
dev->board.cnt_bits = 16;
dev->board.pwm_chans = 0;
dev->board.pwm_bits = 0;
break;
case DEVICE_VMK8061:
dev->board.name = "K8061 (VM140)";
dev->board.model = VMK8061_MODEL;
dev->board.range = &vmk8061_range;
dev->board.ai_chans = 8;
dev->board.ai_bits = 10;
dev->board.ao_chans = 8;
dev->board.ao_bits = 8;
dev->board.di_chans = 8;
dev->board.di_bits = 1;
dev->board.do_chans = 8;
dev->board.do_bits = 1;
dev->board.cnt_chans = 2;
dev->board.cnt_bits = 0;
dev->board.pwm_chans = 1;
dev->board.pwm_bits = 10;
break;
}
if (dev->board.model == VMK8061_MODEL) {
vmk80xx_read_eeprom(dev, IC3_VERSION);
printk(KERN_INFO "comedi#: vmk80xx: %s\n", dev->fw.ic3_vers);
if (vmk80xx_check_data_link(dev)) {
vmk80xx_read_eeprom(dev, IC6_VERSION);
printk(KERN_INFO "comedi#: vmk80xx: %s\n",
dev->fw.ic6_vers);
} else {
dbgcm("comedi#: vmk80xx: no conn. to CPU\n");
}
}
if (dev->board.model == VMK8055_MODEL)
vmk80xx_reset_device(dev);
dev->probed = 1;
printk(KERN_INFO "comedi#: vmk80xx: board #%d [%s] now attached\n",
dev->count, dev->board.name);
mutex_unlock(&glb_mutex);
comedi_usb_auto_config(dev->udev, BOARDNAME);
return 0;
error:
mutex_unlock(&glb_mutex);
return -ENODEV;
}
static void vmk80xx_disconnect(struct usb_interface *intf)
{
struct vmk80xx_usb *dev = usb_get_intfdata(intf);
dbgvm("vmk80xx: %s\n", __func__);
if (!dev)
return;
comedi_usb_auto_unconfig(dev->udev);
mutex_lock(&glb_mutex);
down(&dev->limit_sem);
dev->probed = 0;
usb_set_intfdata(dev->intf, NULL);
usb_kill_anchored_urbs(&dev->rx_anchor);
usb_kill_anchored_urbs(&dev->tx_anchor);
kfree(dev->usb_rx_buf);
kfree(dev->usb_tx_buf);
printk(KERN_INFO "comedi#: vmk80xx: board #%d [%s] now detached\n",
dev->count, dev->board.name);
up(&dev->limit_sem);
mutex_unlock(&glb_mutex);
}
/* TODO: Add support for suspend, resume, pre_reset,
* post_reset and flush */
static struct usb_driver vmk80xx_driver = {
.name = "vmk80xx",
.probe = vmk80xx_probe,
.disconnect = vmk80xx_disconnect,
.id_table = vmk80xx_id_table
};
static struct comedi_driver driver_vmk80xx = {
.module = THIS_MODULE,
.driver_name = "vmk80xx",
.attach = vmk80xx_attach,
.detach = vmk80xx_detach
};
static int __init vmk80xx_init(void)
{
int retval;
printk(KERN_INFO "vmk80xx: version 0.8.01 "
"Manuel Gebele <forensixs@gmx.de>\n");
retval = comedi_driver_register(&driver_vmk80xx);
if (retval < 0)
return retval;
return usb_register(&vmk80xx_driver);
}
static void __exit vmk80xx_exit(void)
{
comedi_driver_unregister(&driver_vmk80xx);
usb_deregister(&vmk80xx_driver);
}
module_init(vmk80xx_init);
module_exit(vmk80xx_exit);
| gpl-2.0 |
Renzo-Olivares/BAMF_android_kernel_htc_msm8660 | arch/um/sys-i386/elfcore.c | 11419 | 1947 | #include <linux/elf.h>
#include <linux/coredump.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/elf.h>
Elf32_Half elf_core_extra_phdrs(void)
{
return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
}
int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
Elf32_Off ofs = 0;
for (i = 0; i < ehdrp->e_phnum; ++i) {
struct elf_phdr phdr = phdrp[i];
if (phdr.p_type == PT_LOAD) {
ofs = phdr.p_offset = offset;
offset += phdr.p_filesz;
} else {
phdr.p_offset += ofs;
}
phdr.p_paddr = 0; /* match other core phdrs */
*size += sizeof(phdr);
if (*size > limit
|| !dump_write(file, &phdr, sizeof(phdr)))
return 0;
}
}
return 1;
}
int elf_core_write_extra_data(struct file *file, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i) {
if (phdrp[i].p_type == PT_LOAD) {
void *addr = (void *) phdrp[i].p_vaddr;
size_t filesz = phdrp[i].p_filesz;
*size += filesz;
if (*size > limit
|| !dump_write(file, addr, filesz))
return 0;
}
}
}
return 1;
}
size_t elf_core_extra_data_size(void)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *)vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i)
if (phdrp[i].p_type == PT_LOAD)
return (size_t) phdrp[i].p_filesz;
}
return 0;
}
| gpl-2.0 |
Project-Elite/elite_kernel_grouper | arch/um/sys-i386/elfcore.c | 11419 | 1947 | #include <linux/elf.h>
#include <linux/coredump.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <asm/elf.h>
Elf32_Half elf_core_extra_phdrs(void)
{
return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
}
int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
Elf32_Off ofs = 0;
for (i = 0; i < ehdrp->e_phnum; ++i) {
struct elf_phdr phdr = phdrp[i];
if (phdr.p_type == PT_LOAD) {
ofs = phdr.p_offset = offset;
offset += phdr.p_filesz;
} else {
phdr.p_offset += ofs;
}
phdr.p_paddr = 0; /* match other core phdrs */
*size += sizeof(phdr);
if (*size > limit
|| !dump_write(file, &phdr, sizeof(phdr)))
return 0;
}
}
return 1;
}
int elf_core_write_extra_data(struct file *file, size_t *size,
unsigned long limit)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *) vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i) {
if (phdrp[i].p_type == PT_LOAD) {
void *addr = (void *) phdrp[i].p_vaddr;
size_t filesz = phdrp[i].p_filesz;
*size += filesz;
if (*size > limit
|| !dump_write(file, addr, filesz))
return 0;
}
}
}
return 1;
}
size_t elf_core_extra_data_size(void)
{
if ( vsyscall_ehdr ) {
const struct elfhdr *const ehdrp =
(struct elfhdr *)vsyscall_ehdr;
const struct elf_phdr *const phdrp =
(const struct elf_phdr *) (vsyscall_ehdr + ehdrp->e_phoff);
int i;
for (i = 0; i < ehdrp->e_phnum; ++i)
if (phdrp[i].p_type == PT_LOAD)
return (size_t) phdrp[i].p_filesz;
}
return 0;
}
| gpl-2.0 |
Fred6681/android_kernel_samsung_golden | arch/powerpc/platforms/8xx/adder875.c | 12187 | 3142 | /* Analogue & Micro Adder MPC875 board support
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/fs_enet_pd.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cpm1.h>
#include <asm/fs_pd.h>
#include <asm/udbg.h>
#include <asm/prom.h>
#include "mpc8xx.h"
struct cpm_pin {
int port, pin, flags;
};
static __initdata struct cpm_pin adder875_pins[] = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* MII1 */
{CPM_PORTA, 0, CPM_PIN_INPUT},
{CPM_PORTA, 1, CPM_PIN_INPUT},
{CPM_PORTA, 2, CPM_PIN_INPUT},
{CPM_PORTA, 3, CPM_PIN_INPUT},
{CPM_PORTA, 4, CPM_PIN_OUTPUT},
{CPM_PORTA, 10, CPM_PIN_OUTPUT},
{CPM_PORTA, 11, CPM_PIN_OUTPUT},
{CPM_PORTB, 19, CPM_PIN_INPUT},
{CPM_PORTB, 31, CPM_PIN_INPUT},
{CPM_PORTC, 12, CPM_PIN_INPUT},
{CPM_PORTC, 13, CPM_PIN_INPUT},
{CPM_PORTE, 30, CPM_PIN_OUTPUT},
{CPM_PORTE, 31, CPM_PIN_OUTPUT},
/* MII2 */
{CPM_PORTE, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 16, CPM_PIN_OUTPUT},
{CPM_PORTE, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 21, CPM_PIN_OUTPUT},
{CPM_PORTE, 22, CPM_PIN_OUTPUT},
{CPM_PORTE, 23, CPM_PIN_OUTPUT},
{CPM_PORTE, 24, CPM_PIN_OUTPUT},
{CPM_PORTE, 25, CPM_PIN_OUTPUT},
{CPM_PORTE, 26, CPM_PIN_OUTPUT},
{CPM_PORTE, 27, CPM_PIN_OUTPUT},
{CPM_PORTE, 28, CPM_PIN_OUTPUT},
{CPM_PORTE, 29, CPM_PIN_OUTPUT},
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(adder875_pins); i++) {
const struct cpm_pin *pin = &adder875_pins[i];
cpm1_set_pin(pin->port, pin->pin, pin->flags);
}
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
/* Set FEC1 and FEC2 to MII mode */
clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
}
static void __init adder875_setup(void)
{
cpm_reset();
init_ioports();
}
static int __init adder875_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "analogue-and-micro,adder875");
}
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(adder875, declare_of_platform_devices);
define_machine(adder875) {
.name = "Adder MPC875",
.probe = adder875_probe,
.setup_arch = adder875_setup,
.init_IRQ = mpc8xx_pics_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = generic_calibrate_decr,
.set_rtc_time = mpc8xx_set_rtc_time,
.get_rtc_time = mpc8xx_get_rtc_time,
.progress = udbg_progress,
};
| gpl-2.0 |
DespairFactor/N6 | lib/sort.c | 12955 | 2605 | /*
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
*
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sort.h>
#include <linux/slab.h>
static void u32_swap(void *a, void *b, int size)
{
u32 t = *(u32 *)a;
*(u32 *)a = *(u32 *)b;
*(u32 *)b = t;
}
static void generic_swap(void *a, void *b, int size)
{
char t;
do {
t = *(char *)a;
*(char *)a++ = *(char *)b;
*(char *)b++ = t;
} while (--size > 0);
}
/**
* sort - sort an array of elements
* @base: pointer to data to sort
* @num: number of elements
* @size: size of each element
* @cmp_func: pointer to comparison function
* @swap_func: pointer to swap function or NULL
*
* This function does a heapsort on the given array. You may provide a
* swap_func function optimized to your element type.
*
* Sorting time is O(n log n) both on average and worst-case. While
* qsort is about 20% faster on average, it suffers from exploitable
* O(n*n) worst-case behavior and extra memory requirements that make
* it less suitable for kernel use.
*/
void sort(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *),
void (*swap_func)(void *, void *, int size))
{
/* pre-scale counters for performance */
int i = (num/2 - 1) * size, n = num * size, c, r;
if (!swap_func)
swap_func = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for ( ; i >= 0; i -= size) {
for (r = i; r * 2 + size < n; r = c) {
c = r * 2 + size;
if (c < n - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
/* sort */
for (i = n - size; i > 0; i -= size) {
swap_func(base, base + i, size);
for (r = 0; r * 2 + size < i; r = c) {
c = r * 2 + size;
if (c < i - size &&
cmp_func(base + c, base + c + size) < 0)
c += size;
if (cmp_func(base + r, base + c) >= 0)
break;
swap_func(base + r, base + c, size);
}
}
}
EXPORT_SYMBOL(sort);
#if 0
/* a simple boot-time regression test */
int cmpint(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
static int sort_test(void)
{
int *a, i, r = 1;
a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
BUG_ON(!a);
printk("testing sort()\n");
for (i = 0; i < 1000; i++) {
r = (r * 725861) % 6599;
a[i] = r;
}
sort(a, 1000, sizeof(int), cmpint, NULL);
for (i = 0; i < 999; i++)
if (a[i] > a[i+1]) {
printk("sort() failed!\n");
break;
}
kfree(a);
return 0;
}
module_init(sort_test);
#endif
| gpl-2.0 |
Shelnutt2/android_kernel_lge_gee_3.4 | sound/isa/gus/gus_dma.c | 14491 | 7206 | /*
* Routines for GF1 DMA control
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/dma.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/gus.h>
static void snd_gf1_dma_ack(struct snd_gus_card * gus)
{
unsigned long flags;
spin_lock_irqsave(&gus->reg_lock, flags);
snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL, 0x00);
snd_gf1_look8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL);
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
static void snd_gf1_dma_program(struct snd_gus_card * gus,
unsigned int addr,
unsigned long buf_addr,
unsigned int count,
unsigned int cmd)
{
unsigned long flags;
unsigned int address;
unsigned char dma_cmd;
unsigned int address_high;
snd_printdd("dma_transfer: addr=0x%x, buf=0x%lx, count=0x%x\n",
addr, buf_addr, count);
if (gus->gf1.dma1 > 3) {
if (gus->gf1.enh_mode) {
address = addr >> 1;
} else {
if (addr & 0x1f) {
snd_printd("snd_gf1_dma_transfer: unaligned address (0x%x)?\n", addr);
return;
}
address = (addr & 0x000c0000) | ((addr & 0x0003ffff) >> 1);
}
} else {
address = addr;
}
dma_cmd = SNDRV_GF1_DMA_ENABLE | (unsigned short) cmd;
#if 0
dma_cmd |= 0x08;
#endif
if (dma_cmd & SNDRV_GF1_DMA_16BIT) {
count++;
count &= ~1; /* align */
}
if (gus->gf1.dma1 > 3) {
dma_cmd |= SNDRV_GF1_DMA_WIDTH16;
count++;
count &= ~1; /* align */
}
snd_gf1_dma_ack(gus);
snd_dma_program(gus->gf1.dma1, buf_addr, count, dma_cmd & SNDRV_GF1_DMA_READ ? DMA_MODE_READ : DMA_MODE_WRITE);
#if 0
snd_printk(KERN_DEBUG "address = 0x%x, count = 0x%x, dma_cmd = 0x%x\n",
address << 1, count, dma_cmd);
#endif
spin_lock_irqsave(&gus->reg_lock, flags);
if (gus->gf1.enh_mode) {
address_high = ((address >> 16) & 0x000000f0) | (address & 0x0000000f);
snd_gf1_write16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW, (unsigned short) (address >> 4));
snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_HIGH, (unsigned char) address_high);
} else
snd_gf1_write16(gus, SNDRV_GF1_GW_DRAM_DMA_LOW, (unsigned short) (address >> 4));
snd_gf1_write8(gus, SNDRV_GF1_GB_DRAM_DMA_CONTROL, dma_cmd);
spin_unlock_irqrestore(&gus->reg_lock, flags);
}
static struct snd_gf1_dma_block *snd_gf1_dma_next_block(struct snd_gus_card * gus)
{
struct snd_gf1_dma_block *block;
/* PCM block have bigger priority than synthesizer one */
if (gus->gf1.dma_data_pcm) {
block = gus->gf1.dma_data_pcm;
if (gus->gf1.dma_data_pcm_last == block) {
gus->gf1.dma_data_pcm =
gus->gf1.dma_data_pcm_last = NULL;
} else {
gus->gf1.dma_data_pcm = block->next;
}
} else if (gus->gf1.dma_data_synth) {
block = gus->gf1.dma_data_synth;
if (gus->gf1.dma_data_synth_last == block) {
gus->gf1.dma_data_synth =
gus->gf1.dma_data_synth_last = NULL;
} else {
gus->gf1.dma_data_synth = block->next;
}
} else {
block = NULL;
}
if (block) {
gus->gf1.dma_ack = block->ack;
gus->gf1.dma_private_data = block->private_data;
}
return block;
}
static void snd_gf1_dma_interrupt(struct snd_gus_card * gus)
{
struct snd_gf1_dma_block *block;
snd_gf1_dma_ack(gus);
if (gus->gf1.dma_ack)
gus->gf1.dma_ack(gus, gus->gf1.dma_private_data);
spin_lock(&gus->dma_lock);
if (gus->gf1.dma_data_pcm == NULL &&
gus->gf1.dma_data_synth == NULL) {
gus->gf1.dma_ack = NULL;
gus->gf1.dma_flags &= ~SNDRV_GF1_DMA_TRIGGER;
spin_unlock(&gus->dma_lock);
return;
}
block = snd_gf1_dma_next_block(gus);
spin_unlock(&gus->dma_lock);
snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
kfree(block);
#if 0
snd_printd(KERN_DEBUG "program dma (IRQ) - "
"addr = 0x%x, buffer = 0x%lx, count = 0x%x, cmd = 0x%x\n",
block->addr, block->buf_addr, block->count, block->cmd);
#endif
}
int snd_gf1_dma_init(struct snd_gus_card * gus)
{
mutex_lock(&gus->dma_mutex);
gus->gf1.dma_shared++;
if (gus->gf1.dma_shared > 1) {
mutex_unlock(&gus->dma_mutex);
return 0;
}
gus->gf1.interrupt_handler_dma_write = snd_gf1_dma_interrupt;
gus->gf1.dma_data_pcm =
gus->gf1.dma_data_pcm_last =
gus->gf1.dma_data_synth =
gus->gf1.dma_data_synth_last = NULL;
mutex_unlock(&gus->dma_mutex);
return 0;
}
int snd_gf1_dma_done(struct snd_gus_card * gus)
{
struct snd_gf1_dma_block *block;
mutex_lock(&gus->dma_mutex);
gus->gf1.dma_shared--;
if (!gus->gf1.dma_shared) {
snd_dma_disable(gus->gf1.dma1);
snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_DMA_WRITE);
snd_gf1_dma_ack(gus);
while ((block = gus->gf1.dma_data_pcm)) {
gus->gf1.dma_data_pcm = block->next;
kfree(block);
}
while ((block = gus->gf1.dma_data_synth)) {
gus->gf1.dma_data_synth = block->next;
kfree(block);
}
gus->gf1.dma_data_pcm_last =
gus->gf1.dma_data_synth_last = NULL;
}
mutex_unlock(&gus->dma_mutex);
return 0;
}
int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
struct snd_gf1_dma_block * __block,
int atomic,
int synth)
{
unsigned long flags;
struct snd_gf1_dma_block *block;
block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
if (block == NULL) {
snd_printk(KERN_ERR "gf1: DMA transfer failure; not enough memory\n");
return -ENOMEM;
}
*block = *__block;
block->next = NULL;
snd_printdd("addr = 0x%x, buffer = 0x%lx, count = 0x%x, cmd = 0x%x\n",
block->addr, (long) block->buffer, block->count,
block->cmd);
snd_printdd("gus->gf1.dma_data_pcm_last = 0x%lx\n",
(long)gus->gf1.dma_data_pcm_last);
snd_printdd("gus->gf1.dma_data_pcm = 0x%lx\n",
(long)gus->gf1.dma_data_pcm);
spin_lock_irqsave(&gus->dma_lock, flags);
if (synth) {
if (gus->gf1.dma_data_synth_last) {
gus->gf1.dma_data_synth_last->next = block;
gus->gf1.dma_data_synth_last = block;
} else {
gus->gf1.dma_data_synth =
gus->gf1.dma_data_synth_last = block;
}
} else {
if (gus->gf1.dma_data_pcm_last) {
gus->gf1.dma_data_pcm_last->next = block;
gus->gf1.dma_data_pcm_last = block;
} else {
gus->gf1.dma_data_pcm =
gus->gf1.dma_data_pcm_last = block;
}
}
if (!(gus->gf1.dma_flags & SNDRV_GF1_DMA_TRIGGER)) {
gus->gf1.dma_flags |= SNDRV_GF1_DMA_TRIGGER;
block = snd_gf1_dma_next_block(gus);
spin_unlock_irqrestore(&gus->dma_lock, flags);
if (block == NULL)
return 0;
snd_gf1_dma_program(gus, block->addr, block->buf_addr, block->count, (unsigned short) block->cmd);
kfree(block);
return 0;
}
spin_unlock_irqrestore(&gus->dma_lock, flags);
return 0;
}
| gpl-2.0 |
craneboard/craneboard-kernel-v3 | drivers/media/video/omap3isp/ispstat.c | 156 | 31062 | /*
* ispstat.c
*
* TI OMAP3 ISP - Statistics core
*
* Copyright (C) 2010 Nokia Corporation
* Copyright (C) 2009 Texas Instruments, Inc
*
* Contacts: David Cohen <dacohen@gmail.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Sakari Ailus <sakari.ailus@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "isp.h"
#define IS_COHERENT_BUF(stat) ((stat)->dma_ch >= 0)
/*
* MAGIC_SIZE must always be the greatest common divisor of
* AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
*/
#define MAGIC_SIZE 16
#define MAGIC_NUM 0x55
/* HACK: AF module seems to be writing one more paxel data than it should. */
#define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE
/*
* HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
* the next buffer to start to be written in the same point where the overflow
* occurred instead of the configured address. The only known way to make it to
* go back to a valid state is having a valid buffer processing. Of course it
* requires at least a doubled buffer size to avoid an access to invalid memory
* region. But it does not fix everything. It may happen more than one
* consecutive SBL overflows. In that case, it might be unpredictable how many
* buffers the allocated memory should fit. For that case, a recover
* configuration was created. It produces the minimum buffer size for each H3A
* module and decrease the change for more SBL overflows. This recover state
* will be enabled every time a SBL overflow occur. As the output buffer size
* isn't big, it's possible to have an extra size able to fit many recover
* buffers making it extreamily unlikely to have an access to invalid memory
* region.
*/
#define NUM_H3A_RECOVER_BUFS 10
/*
* HACK: Because of HW issues the generic layer sometimes need to have
* different behaviour for different statistic modules.
*/
#define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
#define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
#define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
static void __isp_stat_buf_sync_magic(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size, enum dma_data_direction dir,
void (*dma_sync)(struct device *,
dma_addr_t, unsigned long, size_t,
enum dma_data_direction))
{
struct device *dev = stat->isp->dev;
struct page *pg;
dma_addr_t dma_addr;
u32 offset;
/* Initial magic words */
pg = vmalloc_to_page(buf->virt_addr);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);
/* Final magic words */
pg = vmalloc_to_page(buf->virt_addr + buf_size);
dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
}
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size,
enum dma_data_direction dir)
{
if (IS_COHERENT_BUF(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
dma_sync_single_range_for_device);
}
static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf,
u32 buf_size,
enum dma_data_direction dir)
{
if (IS_COHERENT_BUF(stat))
return;
__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
dma_sync_single_range_for_cpu);
}
static int isp_stat_buf_check_magic(struct ispstat *stat,
struct ispstat_buffer *buf)
{
const u32 buf_size = IS_H3A_AF(stat) ?
buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
u8 *w;
u8 *end;
int ret = -EINVAL;
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
/* Checking initial magic numbers. They shouldn't be here anymore. */
for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
if (likely(*w != MAGIC_NUM))
ret = 0;
if (ret) {
dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
"match.\n", stat->subdev.name);
return ret;
}
/* Checking magic numbers at the end. They must be still here. */
for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
w < end; w++) {
if (unlikely(*w != MAGIC_NUM)) {
dev_dbg(stat->isp->dev, "%s: endding magic check does "
"not match.\n", stat->subdev.name);
return -EINVAL;
}
}
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
DMA_FROM_DEVICE);
return 0;
}
static void isp_stat_buf_insert_magic(struct ispstat *stat,
struct ispstat_buffer *buf)
{
const u32 buf_size = IS_H3A_AF(stat) ?
stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
/*
* Inserting MAGIC_NUM at the beginning and end of the buffer.
* buf->buf_size is set only after the buffer is queued. For now the
* right buf_size for the current configuration is pointed by
* stat->buf_size.
*/
memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
DMA_BIDIRECTIONAL);
}
static void isp_stat_buf_sync_for_device(struct ispstat *stat,
struct ispstat_buffer *buf)
{
if (IS_COHERENT_BUF(stat))
return;
dma_sync_sg_for_device(stat->isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
struct ispstat_buffer *buf)
{
if (IS_COHERENT_BUF(stat))
return;
dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, DMA_FROM_DEVICE);
}
static void isp_stat_buf_clear(struct ispstat *stat)
{
int i;
for (i = 0; i < STAT_MAX_BUFS; i++)
stat->buf[i].empty = 1;
}
static struct ispstat_buffer *
__isp_stat_buf_find(struct ispstat *stat, int look_empty)
{
struct ispstat_buffer *found = NULL;
int i;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *curr = &stat->buf[i];
/*
* Don't select the buffer which is being copied to
* userspace or used by the module.
*/
if (curr == stat->locked_buf || curr == stat->active_buf)
continue;
/* Don't select uninitialised buffers if it's not required */
if (!look_empty && curr->empty)
continue;
/* Pick uninitialised buffer over anything else if look_empty */
if (curr->empty) {
found = curr;
break;
}
/* Choose the oldest buffer */
if (!found ||
(s32)curr->frame_number - (s32)found->frame_number < 0)
found = curr;
}
return found;
}
static inline struct ispstat_buffer *
isp_stat_buf_find_oldest(struct ispstat *stat)
{
return __isp_stat_buf_find(stat, 0);
}
static inline struct ispstat_buffer *
isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
{
return __isp_stat_buf_find(stat, 1);
}
static int isp_stat_buf_queue(struct ispstat *stat)
{
if (!stat->active_buf)
return STAT_NO_BUF;
do_gettimeofday(&stat->active_buf->ts);
stat->active_buf->buf_size = stat->buf_size;
if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
stat->subdev.name);
return STAT_NO_BUF;
}
stat->active_buf->config_counter = stat->config_counter;
stat->active_buf->frame_number = stat->frame_number;
stat->active_buf->empty = 0;
stat->active_buf = NULL;
return STAT_BUF_DONE;
}
/* Get next free buffer to write the statistics to and mark it active. */
static void isp_stat_buf_next(struct ispstat *stat)
{
if (unlikely(stat->active_buf))
/* Overwriting unused active buffer */
dev_dbg(stat->isp->dev, "%s: new buffer requested without "
"queuing active one.\n",
stat->subdev.name);
else
stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
}
static void isp_stat_buf_release(struct ispstat *stat)
{
unsigned long flags;
isp_stat_buf_sync_for_device(stat, stat->locked_buf);
spin_lock_irqsave(&stat->isp->stat_lock, flags);
stat->locked_buf = NULL;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
}
/* Get buffer to userspace. */
static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
struct omap3isp_stat_data *data)
{
int rval = 0;
unsigned long flags;
struct ispstat_buffer *buf;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
while (1) {
buf = isp_stat_buf_find_oldest(stat);
if (!buf) {
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
stat->subdev.name);
return ERR_PTR(-EBUSY);
}
if (isp_stat_buf_check_magic(stat, buf)) {
dev_dbg(stat->isp->dev, "%s: current buffer has "
"corrupted data\n.", stat->subdev.name);
/* Mark empty because it doesn't have valid data. */
buf->empty = 1;
} else {
/* Buffer isn't corrupted. */
break;
}
}
stat->locked_buf = buf;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
if (buf->buf_size > data->buf_size) {
dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
"not enough.\n", stat->subdev.name);
isp_stat_buf_release(stat);
return ERR_PTR(-EINVAL);
}
isp_stat_buf_sync_for_cpu(stat, buf);
rval = copy_to_user(data->buf,
buf->virt_addr,
buf->buf_size);
if (rval) {
dev_info(stat->isp->dev,
"%s: failed copying %d bytes of stat data\n",
stat->subdev.name, rval);
buf = ERR_PTR(-EFAULT);
isp_stat_buf_release(stat);
}
return buf;
}
static void isp_stat_bufs_free(struct ispstat *stat)
{
struct isp_device *isp = stat->isp;
int i;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
if (!IS_COHERENT_BUF(stat)) {
if (IS_ERR_OR_NULL((void *)buf->iommu_addr))
continue;
if (buf->iovm)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
omap_iommu_vfree(isp->domain, isp->dev,
buf->iommu_addr);
} else {
if (!buf->virt_addr)
continue;
dma_free_coherent(stat->isp->dev, stat->buf_alloc_size,
buf->virt_addr, buf->dma_addr);
}
buf->iommu_addr = 0;
buf->iovm = NULL;
buf->dma_addr = 0;
buf->virt_addr = NULL;
buf->empty = 1;
}
dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
stat->subdev.name);
stat->buf_alloc_size = 0;
stat->active_buf = NULL;
}
static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
{
struct isp_device *isp = stat->isp;
int i;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
"%s: Can't acquire memory for "
"buffer %d\n", stat->subdev.name, i);
isp_stat_bufs_free(stat);
return -ENOMEM;
}
iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->iovm = iovm;
buf->virt_addr = omap_da_to_va(stat->isp->dev,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
"iommu_addr=0x%08lx virt_addr=0x%08lx",
stat->subdev.name, i, buf->iommu_addr,
(unsigned long)buf->virt_addr);
}
return 0;
}
static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size)
{
int i;
stat->buf_alloc_size = size;
for (i = 0; i < STAT_MAX_BUFS; i++) {
struct ispstat_buffer *buf = &stat->buf[i];
WARN_ON(buf->iommu_addr);
buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size,
&buf->dma_addr, GFP_KERNEL | GFP_DMA);
if (!buf->virt_addr || !buf->dma_addr) {
dev_info(stat->isp->dev,
"%s: Can't acquire memory for "
"DMA buffer %d\n", stat->subdev.name, i);
isp_stat_bufs_free(stat);
return -ENOMEM;
}
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
"dma_addr=0x%08lx virt_addr=0x%08lx\n",
stat->subdev.name, i, (unsigned long)buf->dma_addr,
(unsigned long)buf->virt_addr);
}
return 0;
}
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
{
unsigned long flags;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
BUG_ON(stat->locked_buf != NULL);
/* Are the old buffers big enough? */
if (stat->buf_alloc_size >= size) {
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
return 0;
}
if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
dev_info(stat->isp->dev,
"%s: trying to allocate memory when busy\n",
stat->subdev.name);
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
return -EBUSY;
}
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
isp_stat_bufs_free(stat);
if (IS_COHERENT_BUF(stat))
return isp_stat_bufs_alloc_dma(stat, size);
else
return isp_stat_bufs_alloc_iommu(stat, size);
}
static void isp_stat_queue_event(struct ispstat *stat, int err)
{
struct video_device *vdev = stat->subdev.devnode;
struct v4l2_event event;
struct omap3isp_stat_event_status *status = (void *)event.u.data;
memset(&event, 0, sizeof(event));
if (!err) {
status->frame_number = stat->frame_number;
status->config_counter = stat->config_counter;
} else {
status->buf_err = 1;
}
event.type = stat->event_type;
v4l2_event_queue(vdev, &event);
}
/*
* omap3isp_stat_request_statistics - Request statistics.
* @data: Pointer to return statistics data.
*
* Returns 0 if successful.
*/
int omap3isp_stat_request_statistics(struct ispstat *stat,
struct omap3isp_stat_data *data)
{
struct ispstat_buffer *buf;
if (stat->state != ISPSTAT_ENABLED) {
dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
stat->subdev.name);
return -EINVAL;
}
mutex_lock(&stat->ioctl_lock);
buf = isp_stat_buf_get(stat, data);
if (IS_ERR(buf)) {
mutex_unlock(&stat->ioctl_lock);
return PTR_ERR(buf);
}
data->ts = buf->ts;
data->config_counter = buf->config_counter;
data->frame_number = buf->frame_number;
data->buf_size = buf->buf_size;
buf->empty = 1;
isp_stat_buf_release(stat);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
/*
* omap3isp_stat_config - Receives new statistic engine configuration.
* @new_conf: Pointer to config structure.
*
* Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
* was unable to allocate memory for the buffer, or other errors if parameters
* are invalid.
*/
int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
{
int ret;
unsigned long irqflags;
struct ispstat_generic_config *user_cfg = new_conf;
u32 buf_size = user_cfg->buf_size;
if (!new_conf) {
dev_dbg(stat->isp->dev, "%s: configuration is NULL\n",
stat->subdev.name);
return -EINVAL;
}
mutex_lock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
ret = stat->ops->validate_params(stat, new_conf);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: configuration values are "
"invalid.\n", stat->subdev.name);
return ret;
}
if (buf_size != user_cfg->buf_size)
dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
"request to 0x%08lx\n", stat->subdev.name,
(unsigned long)user_cfg->buf_size);
/*
* Hack: H3A modules may need a doubled buffer size to avoid access
* to a invalid memory address after a SBL overflow.
* The buffer size is always PAGE_ALIGNED.
* Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
* inserted at the end to data integrity check purpose.
* Hack 3: AF module writes one paxel data more than it should, so
* the buffer allocation must consider it to avoid invalid memory
* access.
* Hack 4: H3A need to allocate extra space for the recover state.
*/
if (IS_H3A(stat)) {
buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
if (IS_H3A_AF(stat))
/*
* Adding one extra paxel data size for each recover
* buffer + 2 regular ones.
*/
buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
if (stat->recover_priv) {
struct ispstat_generic_config *recover_cfg =
stat->recover_priv;
buf_size += recover_cfg->buf_size *
NUM_H3A_RECOVER_BUFS;
}
buf_size = PAGE_ALIGN(buf_size);
} else { /* Histogram */
buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
}
ret = isp_stat_bufs_alloc(stat, buf_size);
if (ret) {
mutex_unlock(&stat->ioctl_lock);
return ret;
}
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
stat->ops->set_params(stat, new_conf);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
/*
* Returning the right future config_counter for this setup, so
* userspace can *know* when it has been applied.
*/
user_cfg->config_counter = stat->config_counter + stat->inc_config;
/* Module has a valid configuration. */
stat->configured = 1;
dev_dbg(stat->isp->dev, "%s: module has been successfully "
"configured.\n", stat->subdev.name);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
/*
* isp_stat_buf_process - Process statistic buffers.
* @buf_state: points out if buffer is ready to be processed. It's necessary
* because histogram needs to copy the data from internal memory
* before be able to process the buffer.
*/
static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
{
int ret = STAT_NO_BUF;
if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
ret = isp_stat_buf_queue(stat);
isp_stat_buf_next(stat);
}
return ret;
}
int omap3isp_stat_pcr_busy(struct ispstat *stat)
{
return stat->ops->busy(stat);
}
int omap3isp_stat_busy(struct ispstat *stat)
{
return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
(stat->state != ISPSTAT_DISABLED);
}
/*
* isp_stat_pcr_enable - Disables/Enables statistic engines.
* @pcr_enable: 0/1 - Disables/Enables the engine.
*
* Must be called from ISP driver when the module is idle and synchronized
* with CCDC.
*/
static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
{
if ((stat->state != ISPSTAT_ENABLING &&
stat->state != ISPSTAT_ENABLED) && pcr_enable)
/* Userspace has disabled the module. Aborting. */
return;
stat->ops->enable(stat, pcr_enable);
if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
stat->state = ISPSTAT_DISABLED;
else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
stat->state = ISPSTAT_ENABLED;
}
void omap3isp_stat_suspend(struct ispstat *stat)
{
unsigned long flags;
spin_lock_irqsave(&stat->isp->stat_lock, flags);
if (stat->state != ISPSTAT_DISABLED)
stat->ops->enable(stat, 0);
if (stat->state == ISPSTAT_ENABLED)
stat->state = ISPSTAT_SUSPENDED;
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
}
void omap3isp_stat_resume(struct ispstat *stat)
{
/* Module will be re-enabled with its pipeline */
if (stat->state == ISPSTAT_SUSPENDED)
stat->state = ISPSTAT_ENABLING;
}
static void isp_stat_try_enable(struct ispstat *stat)
{
unsigned long irqflags;
if (stat->priv == NULL)
/* driver wasn't initialised */
return;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
stat->buf_alloc_size) {
/*
* Userspace's requested to enable the engine but it wasn't yet.
* Let's do that now.
*/
stat->update = 1;
isp_stat_buf_next(stat);
stat->ops->setup_regs(stat, stat->priv);
isp_stat_buf_insert_magic(stat, stat->active_buf);
/*
* H3A module has some hw issues which forces the driver to
* ignore next buffers even if it was disabled in the meantime.
* On the other hand, Histogram shouldn't ignore buffers anymore
* if it's being enabled.
*/
if (!IS_H3A(stat))
atomic_set(&stat->buf_err, 0);
isp_stat_pcr_enable(stat, 1);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
stat->subdev.name);
} else {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
}
}
void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
{
isp_stat_try_enable(stat);
}
void omap3isp_stat_sbl_overflow(struct ispstat *stat)
{
unsigned long irqflags;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
/*
* Due to a H3A hw issue which prevents the next buffer to start from
* the correct memory address, 2 buffers must be ignored.
*/
atomic_set(&stat->buf_err, 2);
/*
* If more than one SBL overflow happen in a row, H3A module may access
* invalid memory region.
* stat->sbl_ovl_recover is set to tell to the driver to temporarily use
* a soft configuration which helps to avoid consecutive overflows.
*/
if (stat->recover_priv)
stat->sbl_ovl_recover = 1;
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
}
/*
* omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
* @enable: 0/1 - Disables/Enables the engine.
*
* Client should configure all the module registers before this.
* This function can be called from a userspace request.
*/
int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
{
unsigned long irqflags;
dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
stat->subdev.name, enable ? "enable" : "disable");
/* Prevent enabling while configuring */
mutex_lock(&stat->ioctl_lock);
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (!stat->configured && enable) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
"never been successfully configured so far.\n",
stat->subdev.name);
return -EINVAL;
}
if (enable) {
if (stat->state == ISPSTAT_DISABLING)
/* Previous disabling request wasn't done yet */
stat->state = ISPSTAT_ENABLED;
else if (stat->state == ISPSTAT_DISABLED)
/* Module is now being enabled */
stat->state = ISPSTAT_ENABLING;
} else {
if (stat->state == ISPSTAT_ENABLING) {
/* Previous enabling request wasn't done yet */
stat->state = ISPSTAT_DISABLED;
} else if (stat->state == ISPSTAT_ENABLED) {
/* Module is now being disabled */
stat->state = ISPSTAT_DISABLING;
isp_stat_buf_clear(stat);
}
}
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
mutex_unlock(&stat->ioctl_lock);
return 0;
}
int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct ispstat *stat = v4l2_get_subdevdata(subdev);
if (enable) {
/*
* Only set enable PCR bit if the module was previously
* enabled through ioct.
*/
isp_stat_try_enable(stat);
} else {
unsigned long flags;
/* Disable PCR bit and config enable field */
omap3isp_stat_enable(stat, 0);
spin_lock_irqsave(&stat->isp->stat_lock, flags);
stat->ops->enable(stat, 0);
spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
/*
* If module isn't busy, a new interrupt may come or not to
* set the state to DISABLED. As Histogram needs to read its
* internal memory to clear it, let interrupt handler
* responsible of changing state to DISABLED. If the last
* interrupt is coming, it's still safe as the handler will
* ignore the second time when state is already set to DISABLED.
* It's necessary to synchronize Histogram with streamoff, once
* the module may be considered idle before last SDMA transfer
* starts if we return here.
*/
if (!omap3isp_stat_pcr_busy(stat))
omap3isp_stat_isr(stat);
dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
stat->subdev.name);
}
return 0;
}
/*
* __stat_isr - Interrupt handler for statistic drivers
*/
static void __stat_isr(struct ispstat *stat, int from_dma)
{
int ret = STAT_BUF_DONE;
int buf_processing;
unsigned long irqflags;
struct isp_pipeline *pipe;
/*
* stat->buf_processing must be set before disable module. It's
* necessary to not inform too early the buffers aren't busy in case
* of SDMA is going to be used.
*/
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
if (stat->state == ISPSTAT_DISABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
return;
}
buf_processing = stat->buf_processing;
stat->buf_processing = 1;
stat->ops->enable(stat, 0);
if (buf_processing && !from_dma) {
if (stat->state == ISPSTAT_ENABLED) {
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
dev_err(stat->isp->dev,
"%s: interrupt occurred when module was still "
"processing a buffer.\n", stat->subdev.name);
ret = STAT_NO_BUF;
goto out;
} else {
/*
* Interrupt handler was called from streamoff when
* the module wasn't busy anymore to ensure it is being
* disabled after process last buffer. If such buffer
* processing has already started, no need to do
* anything else.
*/
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
return;
}
}
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
/* If it's busy we can't process this buffer anymore */
if (!omap3isp_stat_pcr_busy(stat)) {
if (!from_dma && stat->ops->buf_process)
/* Module still need to copy data to buffer. */
ret = stat->ops->buf_process(stat);
if (ret == STAT_BUF_WAITING_DMA)
/* Buffer is not ready yet */
return;
spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
/*
* Histogram needs to read its internal memory to clear it
* before be disabled. For that reason, common statistic layer
* can return only after call stat's buf_process() operator.
*/
if (stat->state == ISPSTAT_DISABLING) {
stat->state = ISPSTAT_DISABLED;
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
stat->buf_processing = 0;
return;
}
pipe = to_isp_pipeline(&stat->subdev.entity);
stat->frame_number = atomic_read(&pipe->frame_number);
/*
* Before this point, 'ret' stores the buffer's status if it's
* ready to be processed. Afterwards, it holds the status if
* it was processed successfully.
*/
ret = isp_stat_buf_process(stat, ret);
if (likely(!stat->sbl_ovl_recover)) {
stat->ops->setup_regs(stat, stat->priv);
} else {
/*
* Using recover config to increase the chance to have
* a good buffer processing and make the H3A module to
* go back to a valid state.
*/
stat->update = 1;
stat->ops->setup_regs(stat, stat->recover_priv);
stat->sbl_ovl_recover = 0;
/*
* Set 'update' in case of the module needs to use
* regular configuration after next buffer.
*/
stat->update = 1;
}
isp_stat_buf_insert_magic(stat, stat->active_buf);
/*
* Hack: H3A modules may access invalid memory address or send
* corrupted data to userspace if more than 1 SBL overflow
* happens in a row without re-writing its buffer's start memory
* address in the meantime. Such situation is avoided if the
* module is not immediately re-enabled when the ISR misses the
* timing to process the buffer and to setup the registers.
* Because of that, pcr_enable(1) was moved to inside this 'if'
* block. But the next interruption will still happen as during
* pcr_enable(0) the module was busy.
*/
isp_stat_pcr_enable(stat, 1);
spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
} else {
/*
* If a SBL overflow occurs and the H3A driver misses the timing
* to process the buffer, stat->buf_err is set and won't be
* cleared now. So the next buffer will be correctly ignored.
* It's necessary due to a hw issue which makes the next H3A
* buffer to start from the memory address where the previous
* one stopped, instead of start where it was configured to.
* Do not "stat->buf_err = 0" here.
*/
if (stat->ops->buf_process)
/*
* Driver may need to erase current data prior to
* process a new buffer. If it misses the timing, the
* next buffer might be wrong. So should be ignored.
* It happens only for Histogram.
*/
atomic_set(&stat->buf_err, 1);
ret = STAT_NO_BUF;
dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
"device is busy.\n", stat->subdev.name);
}
out:
stat->buf_processing = 0;
isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
}
void omap3isp_stat_isr(struct ispstat *stat)
{
__stat_isr(stat, 0);
}
void omap3isp_stat_dma_isr(struct ispstat *stat)
{
__stat_isr(stat, 1);
}
int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
struct ispstat *stat = v4l2_get_subdevdata(subdev);
if (sub->type != stat->event_type)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
}
int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
void omap3isp_stat_unregister_entities(struct ispstat *stat)
{
v4l2_device_unregister_subdev(&stat->subdev);
}
int omap3isp_stat_register_entities(struct ispstat *stat,
struct v4l2_device *vdev)
{
return v4l2_device_register_subdev(vdev, &stat->subdev);
}
static int isp_stat_init_entities(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops)
{
struct v4l2_subdev *subdev = &stat->subdev;
struct media_entity *me = &subdev->entity;
v4l2_subdev_init(subdev, sd_ops);
snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
subdev->grp_id = 1 << 16; /* group ID for isp subdevs */
subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
v4l2_set_subdevdata(subdev, stat);
stat->pad.flags = MEDIA_PAD_FL_SINK;
me->ops = NULL;
return media_entity_init(me, 1, &stat->pad, 0);
}
int omap3isp_stat_init(struct ispstat *stat, const char *name,
const struct v4l2_subdev_ops *sd_ops)
{
int ret;
stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
if (!stat->buf)
return -ENOMEM;
isp_stat_buf_clear(stat);
mutex_init(&stat->ioctl_lock);
atomic_set(&stat->buf_err, 0);
ret = isp_stat_init_entities(stat, name, sd_ops);
if (ret < 0) {
mutex_destroy(&stat->ioctl_lock);
kfree(stat->buf);
}
return ret;
}
void omap3isp_stat_cleanup(struct ispstat *stat)
{
media_entity_cleanup(&stat->subdev.entity);
mutex_destroy(&stat->ioctl_lock);
isp_stat_bufs_free(stat);
kfree(stat->buf);
}
| gpl-2.0 |
Dees-Troy/android_kernel_samsung_coreprimelte | drivers/video/msm/mdss/mdss_hdmi_hdcp.c | 668 | 43951 | /* Copyright (c) 2010-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/io.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <soc/qcom/scm.h>
#include "mdss_hdmi_hdcp.h"
#include "video/msm_hdmi_hdcp_mgr.h"
#define HDCP_STATE_NAME (hdcp_state_name(hdcp_ctrl->hdcp_state))
/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
#define HDCP_KEYS_STATE_NO_KEYS 0
#define HDCP_KEYS_STATE_NOT_CHECKED 1
#define HDCP_KEYS_STATE_CHECKING 2
#define HDCP_KEYS_STATE_VALID 3
#define HDCP_KEYS_STATE_AKSV_NOT_VALID 4
#define HDCP_KEYS_STATE_CHKSUM_MISMATCH 5
#define HDCP_KEYS_STATE_PROD_AKSV 6
#define HDCP_KEYS_STATE_RESERVED 7
#define TZ_HDCP_CMD_ID 0x00004401
#define HDCP_REG_ENABLE 0x01
#define HDCP_REG_DISABLE 0x00
#define HDCP_INT_CLR (BIT(1) | BIT(5) | BIT(7) | BIT(9) | BIT(13))
struct hdmi_hdcp_reg_data {
u32 reg_id;
u32 off;
char *name;
u32 reg_val;
};
struct hdmi_hdcp_ctrl {
u32 auth_retries;
u32 tp_msgid;
u32 tz_hdcp;
enum hdmi_hdcp_state hdcp_state;
struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
struct delayed_work hdcp_auth_work;
struct work_struct hdcp_int_work;
struct completion r0_checked;
struct hdmi_hdcp_init_data init_data;
};
const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state)
{
switch (hdcp_state) {
case HDCP_STATE_INACTIVE: return "HDCP_STATE_INACTIVE";
case HDCP_STATE_AUTHENTICATING: return "HDCP_STATE_AUTHENTICATING";
case HDCP_STATE_AUTHENTICATED: return "HDCP_STATE_AUTHENTICATED";
case HDCP_STATE_AUTH_FAIL: return "HDCP_STATE_AUTH_FAIL";
default: return "???";
}
} /* hdcp_state_name */
static int hdmi_hdcp_count_one(u8 *array, u8 len)
{
int i, j, count = 0;
for (i = 0; i < len; i++)
for (j = 0; j < 8; j++)
count += (((array[i] >> j) & 0x1) ? 1 : 0);
return count;
} /* hdmi_hdcp_count_one */
static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int hdcp_ddc_ctrl1_reg;
int hdcp_ddc_status;
int failure;
int nack0;
struct dss_io_data *io;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
io = hdcp_ctrl->init_data.core_io;
/* Check for any DDC transfer failures */
hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
failure = (hdcp_ddc_status >> 16) & 0x1;
nack0 = (hdcp_ddc_status >> 14) & 0x1;
DEV_DBG("%s: %s: On Entry: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
if (failure == 0x1) {
/*
* Indicates that the last HDCP HW DDC transfer failed.
* This occurs when a transfer is attempted with HDCP DDC
* disabled (HDCP_DDC_DISABLE=1) or the number of retries
* matches HDCP_DDC_RETRY_CNT.
* Failure occured, let's clear it.
*/
DEV_DBG("%s: %s: DDC failure detected.HDCP_DDC_STATUS=0x%08x\n",
__func__, HDCP_STATE_NAME, hdcp_ddc_status);
/* First, Disable DDC */
DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
/* ACK the Failure to Clear it */
hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
hdcp_ddc_ctrl1_reg | BIT(0));
/* Check if the FAILURE got Cleared */
hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
if (hdcp_ddc_status == 0x0)
DEV_DBG("%s: %s: HDCP DDC Failure cleared\n", __func__,
HDCP_STATE_NAME);
else
DEV_WARN("%s: %s: Unable to clear HDCP DDC Failure",
__func__, HDCP_STATE_NAME);
/* Re-Enable HDCP DDC */
DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
}
if (nack0 == 0x1) {
DEV_DBG("%s: %s: Before: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
/* Reset HDMI DDC software status */
DSS_REG_W_ND(io, HDMI_DDC_CTRL,
DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
msleep(20);
DSS_REG_W_ND(io, HDMI_DDC_CTRL,
DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
/* Reset HDMI DDC Controller */
DSS_REG_W_ND(io, HDMI_DDC_CTRL,
DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
msleep(20);
DSS_REG_W_ND(io, HDMI_DDC_CTRL,
DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
DEV_DBG("%s: %s: After: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
}
hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
failure = (hdcp_ddc_status >> 16) & BIT(0);
nack0 = (hdcp_ddc_status >> 14) & BIT(0);
DEV_DBG("%s: %s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
} /* reset_hdcp_ddc_failures */
static void hdmi_hdcp_hw_ddc_clean(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct dss_io_data *io = NULL;
u32 hdcp_ddc_status, ddc_hw_status;
u32 ddc_xfer_done, ddc_xfer_req, ddc_hw_done;
u32 ddc_hw_not_ready;
u32 timeout_count;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
io = hdcp_ctrl->init_data.core_io;
if (!io->base) {
DEV_ERR("%s: core io not inititalized\n", __func__);
return;
}
if (DSS_REG_R(io, HDMI_DDC_HW_STATUS) != 0) {
/* Wait to be clean on DDC HW engine */
timeout_count = 100;
do {
hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
ddc_hw_status = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
ddc_xfer_done = hdcp_ddc_status & BIT(10);
ddc_xfer_req = hdcp_ddc_status & BIT(4);
ddc_hw_done = ddc_hw_status & BIT(3);
ddc_hw_not_ready = !ddc_xfer_done ||
ddc_xfer_req || !ddc_hw_done;
DEV_DBG("%s: %s: timeout count(%d):ddc hw%sready\n",
__func__, HDCP_STATE_NAME, timeout_count,
ddc_hw_not_ready ? " not " : " ");
DEV_DBG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
hdcp_ddc_status, ddc_hw_status);
if (ddc_hw_not_ready)
msleep(20);
} while (ddc_hw_not_ready && --timeout_count);
}
} /* hdmi_hdcp_hw_ddc_clean */
static int hdcp_scm_call(struct scm_hdcp_req *req, u32 *resp)
{
int ret = 0;
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_HDCP, SCM_CMD_HDCP, (void *) req,
SCM_HDCP_MAX_REG * sizeof(struct scm_hdcp_req),
&resp, sizeof(*resp));
} else {
struct scm_desc desc;
desc.args[0] = req[0].addr;
desc.args[1] = req[0].val;
desc.args[2] = req[1].addr;
desc.args[3] = req[1].val;
desc.args[4] = req[2].addr;
desc.args[5] = req[2].val;
desc.args[6] = req[3].addr;
desc.args[7] = req[3].val;
desc.args[8] = req[4].addr;
desc.args[9] = req[4].val;
desc.arginfo = SCM_ARGS(10);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_HDCP, SCM_CMD_HDCP),
&desc);
*resp = desc.ret[0];
if (ret)
return ret;
}
return ret;
}
static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc;
u32 qfprom_aksv_lsb, qfprom_aksv_msb;
u32 link0_aksv_0, link0_aksv_1;
u32 link0_bksv_0, link0_bksv_1;
u32 link0_an_0, link0_an_1;
u32 timeout_count;
bool is_match;
bool stale_an = false;
struct dss_io_data *io;
u8 aksv[5], *bksv = NULL;
u8 an[8];
u8 bcaps;
struct hdmi_tx_ddc_data ddc_data;
u32 link0_status, an_ready, keys_state;
u8 buf[0xFF];
struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
u32 phy_addr;
u32 ret = 0;
u32 resp = 0;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
!hdcp_ctrl->init_data.qfprom_io) {
DEV_ERR("%s: invalid input\n", __func__);
rc = -EINVAL;
goto error;
}
phy_addr = hdcp_ctrl->init_data.phy_addr;
if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: invalid state. returning\n", __func__,
HDCP_STATE_NAME);
rc = -EINVAL;
goto error;
}
bksv = hdcp_ctrl->current_tp.bksv;
io = hdcp_ctrl->init_data.core_io;
/* Fetch aksv from QFPROM, this info should be public. */
qfprom_aksv_lsb = DSS_REG_R(hdcp_ctrl->init_data.qfprom_io,
HDCP_KSV_LSB);
qfprom_aksv_msb = DSS_REG_R(hdcp_ctrl->init_data.qfprom_io,
HDCP_KSV_MSB);
aksv[0] = qfprom_aksv_lsb & 0xFF;
aksv[1] = (qfprom_aksv_lsb >> 8) & 0xFF;
aksv[2] = (qfprom_aksv_lsb >> 16) & 0xFF;
aksv[3] = (qfprom_aksv_lsb >> 24) & 0xFF;
aksv[4] = qfprom_aksv_msb & 0xFF;
/* check there are 20 ones in AKSV */
if (hdmi_hdcp_count_one(aksv, 5) != 20) {
DEV_ERR("%s: %s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
__func__, HDCP_STATE_NAME);
DEV_ERR("%s: %s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
__func__, HDCP_STATE_NAME, qfprom_aksv_msb,
qfprom_aksv_lsb);
rc = -EINVAL;
goto error;
}
DEV_DBG("%s: %s: AKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
qfprom_aksv_msb, qfprom_aksv_lsb);
/*
* Write AKSV read from QFPROM to the HDCP registers.
* This step is needed for HDCP authentication and must be
* written before enabling HDCP.
*/
DSS_REG_W(io, HDMI_HDCP_SW_LOWER_AKSV, qfprom_aksv_lsb);
DSS_REG_W(io, HDMI_HDCP_SW_UPPER_AKSV, qfprom_aksv_msb);
/* Check to see if link0_Status has stale values for An ready bit */
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
DEV_DBG("%s: %s: Before enabling cipher Link0_status=0x%08x\n",
__func__, HDCP_STATE_NAME, link0_status);
if (link0_status & (BIT(8) | BIT(9))) {
DEV_DBG("%s: %s: An ready even before enabling HDCP\n",
__func__, HDCP_STATE_NAME);
stale_an = true;
}
/*
* Read BCAPS
* We need to first try to read an HDCP register on the sink to see if
* the sink is ready for HDCP authentication
*/
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x40;
ddc_data.data_buf = &bcaps;
ddc_data.data_len = 1;
ddc_data.request_len = 1;
ddc_data.retry = 5;
ddc_data.what = "Bcaps";
ddc_data.no_align = true;
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
DEV_DBG("%s: %s: BCAPS=%02x\n", __func__, HDCP_STATE_NAME, bcaps);
/* receiver (0), repeater (1) */
hdcp_ctrl->current_tp.ds_type =
(bcaps & BIT(6)) >> 6 ? DS_REPEATER : DS_RECEIVER;
/*
* HDCP setup prior to enabling HDCP_CTRL.
* Setup seed values for random number An.
*/
DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
/* Disable the RngCipher state */
DSS_REG_W(io, HDMI_HDCP_DEBUG_CTRL,
DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL) & ~(BIT(2)));
DEV_DBG("%s: %s: HDCP_DEBUG_CTRL=0x%08x\n", __func__, HDCP_STATE_NAME,
DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL));
/* Ensure that all register writes are completed before
* enabling HDCP cipher
*/
wmb();
/*
* Enable HDCP
* This needs to be done as early as possible in order for the
* hardware to make An available to read
*/
DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0));
/* Clear any DDC failures from previous tries */
reset_hdcp_ddc_failures(hdcp_ctrl);
/* Write BCAPS to the hardware */
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
/* Write BCAPS to hardware */
scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
scm_buf[0].val = bcaps;
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps);
}
/*
* If we had stale values for the An ready bit, it should most
* likely be cleared now after enabling HDCP cipher
*/
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
DEV_DBG("%s: %s: After enabling HDCP Link0_Status=0x%08x\n",
__func__, HDCP_STATE_NAME, link0_status);
if (!(link0_status & (BIT(8) | BIT(9)))) {
DEV_DBG("%s: %s: An not ready after enabling HDCP\n",
__func__, HDCP_STATE_NAME);
stale_an = false;
}
/* Wait for HDCP keys to be checked and validated */
timeout_count = 100;
keys_state = (link0_status >> 28) & 0x7;
while ((keys_state != HDCP_KEYS_STATE_VALID) &&
--timeout_count) {
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
keys_state = (link0_status >> 28) & 0x7;
DEV_DBG("%s: %s: Keys not ready(%d). s=%d\n, l0=%0x08x",
__func__, HDCP_STATE_NAME, timeout_count,
keys_state, link0_status);
msleep(20);
}
if (!timeout_count) {
DEV_ERR("%s: %s: Invalid Keys State: %d\n", __func__,
HDCP_STATE_NAME, keys_state);
rc = -EINVAL;
goto error;
}
/*
* 1.1_Features turned off by default.
* No need to write AInfo since 1.1_Features is disabled.
*/
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA4, 0);
/* Wait for An0 and An1 bit to be ready */
timeout_count = 100;
do {
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
an_ready = (link0_status & BIT(8)) && (link0_status & BIT(9));
if (!an_ready) {
DEV_DBG("%s: %s: An not ready(%d). l0_status=0x%08x\n",
__func__, HDCP_STATE_NAME, timeout_count,
link0_status);
msleep(20);
}
} while (!an_ready && --timeout_count);
if (!timeout_count) {
rc = -ETIMEDOUT;
DEV_ERR("%s: %s: timedout, An0=%ld, An1=%ld\n", __func__,
HDCP_STATE_NAME, (link0_status & BIT(8)) >> 8,
(link0_status & BIT(9)) >> 9);
goto error;
}
/*
* In cases where An_ready bits had stale values, it would be
* better to delay reading of An to avoid any potential of this
* read being blocked
*/
if (stale_an) {
msleep(200);
stale_an = false;
}
/* Read An0 and An1 */
link0_an_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA5);
link0_an_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA6);
/* Read AKSV */
link0_aksv_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA3);
link0_aksv_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA4);
/* Copy An and AKSV to byte arrays for transmission */
aksv[0] = link0_aksv_0 & 0xFF;
aksv[1] = (link0_aksv_0 >> 8) & 0xFF;
aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
aksv[4] = link0_aksv_1 & 0xFF;
an[0] = link0_an_0 & 0xFF;
an[1] = (link0_an_0 >> 8) & 0xFF;
an[2] = (link0_an_0 >> 16) & 0xFF;
an[3] = (link0_an_0 >> 24) & 0xFF;
an[4] = link0_an_1 & 0xFF;
an[5] = (link0_an_1 >> 8) & 0xFF;
an[6] = (link0_an_1 >> 16) & 0xFF;
an[7] = (link0_an_1 >> 24) & 0xFF;
/* Write An to offset 0x18 */
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x18;
ddc_data.data_buf = an;
ddc_data.data_len = 8;
ddc_data.what = "An";
rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: An write failed\n", __func__, HDCP_STATE_NAME);
goto error;
}
/* Write AKSV to offset 0x10 */
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x10;
ddc_data.data_buf = aksv;
ddc_data.data_len = 5;
ddc_data.what = "Aksv";
rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: AKSV write failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
DEV_DBG("%s: %s: Link0-AKSV=%02x%08x\n", __func__,
HDCP_STATE_NAME, link0_aksv_1 & 0xFF, link0_aksv_0);
/* Read BKSV at offset 0x00 */
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x00;
ddc_data.data_buf = bksv;
ddc_data.data_len = 5;
ddc_data.request_len = 5;
ddc_data.retry = 5;
ddc_data.what = "Bksv";
ddc_data.no_align = true;
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: BKSV read failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
/* check there are 20 ones in BKSV */
if (hdmi_hdcp_count_one(bksv, 5) != 20) {
DEV_ERR("%s: %s: BKSV doesn't have 20 1's and 20 0's\n",
__func__, HDCP_STATE_NAME);
DEV_ERR("%s: %s: BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
__func__, HDCP_STATE_NAME, bksv[4], bksv[3], bksv[2],
bksv[1], bksv[0]);
rc = -EINVAL;
goto error;
}
link0_bksv_0 = bksv[3];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
link0_bksv_1 = bksv[4];
DEV_DBG("%s: %s: BKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
link0_bksv_1, link0_bksv_0);
/* Write BKSV read from sink to HDCP registers */
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA0;
scm_buf[0].val = link0_bksv_0;
scm_buf[1].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA1;
scm_buf[1].val = link0_bksv_1;
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA0, link0_bksv_0);
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA1, link0_bksv_1);
}
/* Enable HDCP interrupts and ack/clear any stale interrupts */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0xE6);
/*
* HDCP Compliace Test case 1A-01:
* Wait here at least 100ms before reading R0'
*/
msleep(125);
/* Read R0' at offset 0x08 */
memset(buf, 0, sizeof(buf));
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x08;
ddc_data.data_buf = buf;
ddc_data.data_len = 2;
ddc_data.request_len = 2;
ddc_data.retry = 5;
ddc_data.what = "R0'";
ddc_data.no_align = true;
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: R0' read failed\n", __func__, HDCP_STATE_NAME);
goto error;
}
DEV_DBG("%s: %s: R0'=%02x%02x\n", __func__, HDCP_STATE_NAME,
buf[1], buf[0]);
/* Write R0' to HDCP registers and check to see if it is a match */
INIT_COMPLETION(hdcp_ctrl->r0_checked);
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA2_0, (((u32)buf[1]) << 8) | buf[0]);
timeout_count = wait_for_completion_timeout(
&hdcp_ctrl->r0_checked, HZ*2);
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
is_match = link0_status & BIT(12);
if (!is_match) {
DEV_DBG("%s: %s: Link0_Status=0x%08x\n", __func__,
HDCP_STATE_NAME, link0_status);
if (!timeout_count) {
DEV_ERR("%s: %s: Timeout. No R0 mtch. R0'=%02x%02x\n",
__func__, HDCP_STATE_NAME, buf[1], buf[0]);
rc = -ETIMEDOUT;
goto error;
} else {
DEV_ERR("%s: %s: R0 mismatch. R0'=%02x%02x\n", __func__,
HDCP_STATE_NAME, buf[1], buf[0]);
rc = -EINVAL;
goto error;
}
} else {
DEV_DBG("%s: %s: R0 matches\n", __func__, HDCP_STATE_NAME);
}
error:
if (rc) {
DEV_ERR("%s: %s: Authentication Part I failed\n", __func__,
hdcp_ctrl ? HDCP_STATE_NAME : "???");
} else {
/* Enable HDCP Encryption */
DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0) | BIT(8));
DEV_INFO("%s: %s: Authentication Part I successful\n",
__func__, HDCP_STATE_NAME);
}
return rc;
} /* hdmi_hdcp_authentication_part1 */
#define READ_WRITE_V_H(off, name, reg, wr) \
do { \
ddc_data.offset = (off); \
memset(what, 0, sizeof(what)); \
snprintf(what, sizeof(what), (name)); \
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data); \
if (rc) { \
DEV_ERR("%s: %s: Read %s failed\n", __func__, HDCP_STATE_NAME, \
what); \
goto error; \
} \
DEV_DBG("%s: %s: %s: buf[0]=%x, buf[1]=%x, buf[2]=%x, buf[3]=%x\n", \
__func__, HDCP_STATE_NAME, what, buf[0], buf[1], \
buf[2], buf[3]); \
if (wr) { \
DSS_REG_W(io, (reg), \
(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); \
} \
} while (0);
static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
char what[20];
int rc = 0;
u8 buf[4];
struct hdmi_tx_ddc_data ddc_data;
struct dss_io_data *io;
struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
u32 phy_addr;
struct hdmi_hdcp_reg_data reg_data[] = {
{HDMI_HDCP_RCVPORT_DATA7, 0x20, "V' H0"},
{HDMI_HDCP_RCVPORT_DATA8, 0x24, "V' H1"},
{HDMI_HDCP_RCVPORT_DATA9, 0x28, "V' H2"},
{HDMI_HDCP_RCVPORT_DATA10, 0x2C, "V' H3"},
{HDMI_HDCP_RCVPORT_DATA11, 0x30, "V' H4"},
};
u32 size = sizeof(reg_data)/sizeof(reg_data[0]);
u32 iter = 0;
u32 ret = 0;
u32 resp = 0;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
phy_addr = hdcp_ctrl->init_data.phy_addr;
io = hdcp_ctrl->init_data.core_io;
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.data_buf = buf;
ddc_data.data_len = 4;
ddc_data.request_len = 4;
ddc_data.retry = 5;
ddc_data.what = what;
ddc_data.no_align = true;
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
for (iter = 0; iter < size && iter < SCM_HDCP_MAX_REG; iter++) {
struct hdmi_hdcp_reg_data *rd = reg_data + iter;
READ_WRITE_V_H(rd->off, rd->name, 0, false);
rd->reg_val = buf[3] << 24 | buf[2] << 16 |
buf[1] << 8 | buf[0];
scm_buf[iter].addr = phy_addr + reg_data[iter].reg_id;
scm_buf[iter].val = reg_data[iter].reg_val;
}
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
/* Read V'.HO 4 Byte at offset 0x20 */
READ_WRITE_V_H(0x20, "V' H0", HDMI_HDCP_RCVPORT_DATA7, true);
/* Read V'.H1 4 Byte at offset 0x24 */
READ_WRITE_V_H(0x24, "V' H1", HDMI_HDCP_RCVPORT_DATA8, true);
/* Read V'.H2 4 Byte at offset 0x28 */
READ_WRITE_V_H(0x28, "V' H2", HDMI_HDCP_RCVPORT_DATA9, true);
/* Read V'.H3 4 Byte at offset 0x2C */
READ_WRITE_V_H(0x2C, "V' H3", HDMI_HDCP_RCVPORT_DATA10, true);
/* Read V'.H4 4 Byte at offset 0x30 */
READ_WRITE_V_H(0x30, "V' H4", HDMI_HDCP_RCVPORT_DATA11, true);
}
error:
return rc;
}
static int hdmi_hdcp_authentication_part2(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
int rc, cnt, i;
struct hdmi_tx_ddc_data ddc_data;
u32 timeout_count, down_stream_devices = 0;
u32 repeater_cascade_depth = 0;
u8 buf[0xFF];
u8 *ksv_fifo = NULL;
u8 bcaps;
u16 bstatus, max_devs_exceeded = 0, max_cascade_exceeded = 0;
u32 link0_status;
u32 ksv_bytes;
struct dss_io_data *io;
struct scm_hdcp_req scm_buf[SCM_HDCP_MAX_REG];
u32 phy_addr;
u32 ret = 0;
u32 resp = 0;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
rc = -EINVAL;
goto error;
}
phy_addr = hdcp_ctrl->init_data.phy_addr;
if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: invalid state. returning\n", __func__,
HDCP_STATE_NAME);
rc = -EINVAL;
goto error;
}
ksv_fifo = hdcp_ctrl->current_tp.ksv_list;
io = hdcp_ctrl->init_data.core_io;
memset(buf, 0, sizeof(buf));
memset(ksv_fifo, 0,
sizeof(hdcp_ctrl->current_tp.ksv_list));
/*
* Wait until READY bit is set in BCAPS, as per HDCP specifications
* maximum permitted time to check for READY bit is five seconds.
*/
timeout_count = 50;
do {
timeout_count--;
/* Read BCAPS at offset 0x40 */
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x40;
ddc_data.data_buf = &bcaps;
ddc_data.data_len = 1;
ddc_data.request_len = 1;
ddc_data.retry = 5;
ddc_data.what = "Bcaps";
ddc_data.no_align = false;
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
msleep(100);
} while (!(bcaps & BIT(5)) && timeout_count);
/* Read BSTATUS at offset 0x41 */
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x41;
ddc_data.data_buf = buf;
ddc_data.data_len = 2;
ddc_data.request_len = 2;
ddc_data.retry = 5;
ddc_data.what = "Bstatuss";
ddc_data.no_align = false;
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: BSTATUS read failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
bstatus = buf[1];
bstatus = (bstatus << 8) | buf[0];
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
/* Write BSTATUS and BCAPS to HDCP registers */
scm_buf[0].addr = phy_addr + HDMI_HDCP_RCVPORT_DATA12;
scm_buf[0].val = bcaps | (bstatus << 8);
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps | (bstatus << 8));
}
down_stream_devices = bstatus & 0x7F;
if (down_stream_devices == 0) {
/*
* If no downstream devices are attached to the repeater
* then part II fails.
* todo: The other approach would be to continue PART II.
*/
DEV_ERR("%s: %s: No downstream devices\n", __func__,
HDCP_STATE_NAME);
rc = -EINVAL;
goto error;
}
/* Cascaded repeater depth */
repeater_cascade_depth = (bstatus >> 8) & 0x7;
/*
* HDCP Compliance 1B-05:
* Check if no. of devices connected to repeater
* exceed max_devices_connected from bit 7 of Bstatus.
*/
max_devs_exceeded = (bstatus & BIT(7)) >> 7;
if (max_devs_exceeded == 0x01) {
DEV_ERR("%s: %s: no. of devs connected exceeds max allowed",
__func__, HDCP_STATE_NAME);
rc = -EINVAL;
goto error;
}
/*
* HDCP Compliance 1B-06:
* Check if no. of cascade connected to repeater
* exceed max_cascade_connected from bit 11 of Bstatus.
*/
max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
if (max_cascade_exceeded == 0x01) {
DEV_ERR("%s: %s: no. of cascade conn exceeds max allowed",
__func__, HDCP_STATE_NAME);
rc = -EINVAL;
goto error;
}
/*
* Read KSV FIFO over DDC
* Key Slection vector FIFO Used to pull downstream KSVs
* from HDCP Repeaters.
* All bytes (DEVICE_COUNT * 5) must be read in a single,
* auto incrementing access.
* All bytes read as 0x00 for HDCP Receivers that are not
* HDCP Repeaters (REPEATER == 0).
*/
ksv_bytes = 5 * down_stream_devices;
memset(&ddc_data, 0, sizeof(ddc_data));
ddc_data.dev_addr = 0x74;
ddc_data.offset = 0x43;
ddc_data.data_buf = ksv_fifo;
ddc_data.data_len = ksv_bytes;
ddc_data.request_len = ksv_bytes;
ddc_data.retry = 5;
ddc_data.what = "KSV FIFO";
ddc_data.no_align = true;
cnt = 0;
do {
rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
if (rc) {
DEV_ERR("%s: %s: KSV FIFO read failed\n", __func__,
HDCP_STATE_NAME);
/*
* HDCP Compliace Test case 1B-01:
* Wait here until all the ksv bytes have been
* read from the KSV FIFO register.
*/
msleep(25);
} else {
break;
}
cnt++;
} while (cnt != 20);
if (cnt == 20)
goto error;
rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
if (rc)
goto error;
/*
* Write KSV FIFO to HDCP_SHA_DATA.
* This is done 1 byte at time starting with the LSB.
* On the very last byte write, the HDCP_SHA_DATA_DONE bit[0]
*/
/* First, reset SHA engine */
/* Next, enable SHA engine, SEL=DIGA_HDCP */
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
scm_buf[0].val = HDCP_REG_ENABLE;
scm_buf[1].addr = phy_addr + HDMI_HDCP_SHA_CTRL;
scm_buf[1].val = HDCP_REG_DISABLE;
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_ENABLE);
DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, HDCP_REG_DISABLE);
}
for (i = 0; i < ksv_bytes - 1; i++) {
/* Write KSV byte and do not set DONE bit[0] */
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
scm_buf[0].val = ksv_fifo[i] << 16;
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA, ksv_fifo[i] << 16);
}
/*
* Once 64 bytes have been written, we need to poll for
* HDCP_SHA_BLOCK_DONE before writing any further
*/
if (i && !((i + 1) % 64)) {
timeout_count = 100;
while (!(DSS_REG_R(io, HDMI_HDCP_SHA_STATUS) & BIT(0))
&& (--timeout_count)) {
DEV_DBG("%s: %s: Wrote 64 bytes KVS FIFO\n",
__func__, HDCP_STATE_NAME);
DEV_DBG("%s: %s: HDCP_SHA_STATUS=%08x\n",
__func__, HDCP_STATE_NAME,
DSS_REG_R(io, HDMI_HDCP_SHA_STATUS));
msleep(20);
}
if (!timeout_count) {
rc = -ETIMEDOUT;
DEV_ERR("%s: %s: Write KSV FIFO timedout",
__func__, HDCP_STATE_NAME);
goto error;
}
}
}
/* Write l to DONE bit[0] */
if (hdcp_ctrl->tz_hdcp) {
memset(scm_buf, 0x00, sizeof(scm_buf));
scm_buf[0].addr = phy_addr + HDMI_HDCP_SHA_DATA;
scm_buf[0].val = (ksv_fifo[ksv_bytes - 1] << 16) | 0x1;
ret = hdcp_scm_call(scm_buf, &resp);
if (ret || resp) {
DEV_ERR("%s: error: scm_call ret = %d, resp = %d\n",
__func__, ret, resp);
rc = -EINVAL;
goto error;
}
} else {
DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA,
(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
}
/* Now wait for HDCP_SHA_COMP_DONE */
timeout_count = 100;
while ((0x10 != (DSS_REG_R(io, HDMI_HDCP_SHA_STATUS)
& 0xFFFFFF10)) && --timeout_count)
msleep(20);
if (!timeout_count) {
rc = -ETIMEDOUT;
DEV_ERR("%s: %s: SHA computation timedout", __func__,
HDCP_STATE_NAME);
goto error;
}
/* Wait for V_MATCHES */
timeout_count = 100;
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
while (((link0_status & BIT(20)) != BIT(20)) && --timeout_count) {
DEV_DBG("%s: %s: Waiting for V_MATCHES(%d). l0_status=0x%08x\n",
__func__, HDCP_STATE_NAME, timeout_count, link0_status);
msleep(20);
link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
}
if (!timeout_count) {
rc = -ETIMEDOUT;
DEV_ERR("%s: %s: HDCP V Match timedout", __func__,
HDCP_STATE_NAME);
goto error;
}
error:
if (rc)
DEV_ERR("%s: %s: Authentication Part II failed\n", __func__,
hdcp_ctrl ? HDCP_STATE_NAME : "???");
else
DEV_INFO("%s: %s: Authentication Part II successful\n",
__func__, HDCP_STATE_NAME);
if (!hdcp_ctrl) {
DEV_ERR("%s: hdcp_ctrl null. Topology not updated\n",
__func__);
return rc;
}
/* Update topology information */
hdcp_ctrl->current_tp.dev_count = down_stream_devices;
hdcp_ctrl->current_tp.max_cascade_exceeded = max_cascade_exceeded;
hdcp_ctrl->current_tp.max_dev_exceeded = max_devs_exceeded;
hdcp_ctrl->current_tp.depth = repeater_cascade_depth;
return rc;
} /* hdmi_hdcp_authentication_part2 */
static void hdmi_hdcp_cache_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
memcpy((void *)&hdcp_ctrl->cached_tp,
(void *) &hdcp_ctrl->current_tp,
sizeof(hdcp_ctrl->cached_tp));
}
static void hdmi_hdcp_notify_topology(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
char a[16], b[16];
char *envp[] = {
[0] = "HDCP_MGR_EVENT=MSG_READY",
[1] = a,
[2] = b,
NULL,
};
snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY);
snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX);
kobject_uevent_env(hdcp_ctrl->init_data.sysfs_kobj, KOBJ_CHANGE, envp);
DEV_DBG("%s Event Sent: %s msgID = %s srcID = %s\n", __func__,
envp[0], envp[1], envp[2]);
}
static void hdmi_hdcp_int_work(struct work_struct *work)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
struct hdmi_hdcp_ctrl, hdcp_int_work);
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
mutex_lock(hdcp_ctrl->init_data.mutex);
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
mutex_unlock(hdcp_ctrl->init_data.mutex);
if (hdcp_ctrl->init_data.notify_status) {
hdcp_ctrl->init_data.notify_status(
hdcp_ctrl->init_data.cb_data,
hdcp_ctrl->hdcp_state);
}
} /* hdmi_hdcp_int_work */
static void hdmi_hdcp_auth_work(struct work_struct *work)
{
int rc;
struct delayed_work *dw = to_delayed_work(work);
struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw,
struct hdmi_hdcp_ctrl, hdcp_auth_work);
struct dss_io_data *io;
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: invalid state. returning\n", __func__,
HDCP_STATE_NAME);
return;
}
io = hdcp_ctrl->init_data.core_io;
/* Enabling Software DDC */
DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION , DSS_REG_R(io,
HDMI_DDC_ARBITRATION) & ~(BIT(4)));
rc = hdmi_hdcp_authentication_part1(hdcp_ctrl);
if (rc) {
DEV_DBG("%s: %s: HDCP Auth Part I failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
if (hdcp_ctrl->current_tp.ds_type == DS_REPEATER) {
rc = hdmi_hdcp_authentication_part2(hdcp_ctrl);
if (rc) {
DEV_DBG("%s: %s: HDCP Auth Part II failed\n", __func__,
HDCP_STATE_NAME);
goto error;
}
} else {
DEV_INFO("%s: Downstream device is not a repeater\n", __func__);
}
/* Disabling software DDC before going into part3 to make sure
* there is no Arbitration between software and hardware for DDC */
DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION , DSS_REG_R(io,
HDMI_DDC_ARBITRATION) | (BIT(4)));
error:
/*
* Ensure that the state did not change during authentication.
* If it did, it means that deauthenticate/reauthenticate was
* called. In that case, this function need not notify HDMI Tx
* of the result
*/
mutex_lock(hdcp_ctrl->init_data.mutex);
if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
if (rc) {
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
} else {
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
hdcp_ctrl->auth_retries = 0;
hdmi_hdcp_cache_topology(hdcp_ctrl);
hdmi_hdcp_notify_topology(hdcp_ctrl);
}
mutex_unlock(hdcp_ctrl->init_data.mutex);
/* Notify HDMI Tx controller of the result */
DEV_DBG("%s: %s: Notifying HDMI Tx of auth result\n",
__func__, HDCP_STATE_NAME);
if (hdcp_ctrl->init_data.notify_status) {
hdcp_ctrl->init_data.notify_status(
hdcp_ctrl->init_data.cb_data,
hdcp_ctrl->hdcp_state);
}
} else {
DEV_DBG("%s: %s: HDCP state changed during authentication\n",
__func__, HDCP_STATE_NAME);
mutex_unlock(hdcp_ctrl->init_data.mutex);
}
return;
} /* hdmi_hdcp_auth_work */
int hdmi_hdcp_authenticate(void *input)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
if (HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: already active or activating. returning\n",
__func__, HDCP_STATE_NAME);
return 0;
}
DEV_DBG("%s: %s: Queuing work to start HDCP authentication", __func__,
HDCP_STATE_NAME);
mutex_lock(hdcp_ctrl->init_data.mutex);
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
mutex_unlock(hdcp_ctrl->init_data.mutex);
queue_delayed_work(hdcp_ctrl->init_data.workq,
&hdcp_ctrl->hdcp_auth_work, 0);
return 0;
} /* hdmi_hdcp_authenticate */
int hdmi_hdcp_reauthenticate(void *input)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
struct dss_io_data *io;
u32 ret = 0;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
io = hdcp_ctrl->init_data.core_io;
if (HDCP_STATE_AUTH_FAIL != hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: invalid state. returning\n", __func__,
HDCP_STATE_NAME);
return 0;
}
/*
* Disable HPD circuitry.
* This is needed to reset the HDCP cipher engine so that when we
* attempt a re-authentication, HW would clear the AN0_READY and
* AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
*/
DSS_REG_W(io, HDMI_HPD_CTRL, DSS_REG_R(hdcp_ctrl->init_data.core_io,
HDMI_HPD_CTRL) & ~BIT(28));
/* Disable HDCP interrupts */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
/* Wait to be clean on DDC HW engine */
hdmi_hdcp_hw_ddc_clean(hdcp_ctrl);
/* Disable encryption and disable the HDCP block */
DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
/* Enable HPD circuitry */
DSS_REG_W(hdcp_ctrl->init_data.core_io, HDMI_HPD_CTRL,
DSS_REG_R(hdcp_ctrl->init_data.core_io,
HDMI_HPD_CTRL) | BIT(28));
/* Restart authentication attempt */
DEV_DBG("%s: %s: Scheduling work to start HDCP authentication",
__func__, HDCP_STATE_NAME);
mutex_lock(hdcp_ctrl->init_data.mutex);
hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
mutex_unlock(hdcp_ctrl->init_data.mutex);
queue_delayed_work(hdcp_ctrl->init_data.workq,
&hdcp_ctrl->hdcp_auth_work, HZ/2);
return ret;
} /* hdmi_hdcp_reauthenticate */
void hdmi_hdcp_off(void *input)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
struct dss_io_data *io;
int rc = 0;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
io = hdcp_ctrl->init_data.core_io;
if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) {
DEV_DBG("%s: %s: inactive. returning\n", __func__,
HDCP_STATE_NAME);
return;
}
/*
* Disable HDCP interrupts.
* Also, need to set the state to inactive here so that any ongoing
* reauth works will know that the HDCP session has been turned off.
*/
mutex_lock(hdcp_ctrl->init_data.mutex);
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
mutex_unlock(hdcp_ctrl->init_data.mutex);
/*
* Cancel any pending auth/reauth attempts.
* If one is ongoing, this will wait for it to finish.
* No more reauthentiaction attempts will be scheduled since we
* set the currect state to inactive.
*/
rc = cancel_delayed_work_sync(&hdcp_ctrl->hdcp_auth_work);
if (rc)
DEV_DBG("%s: %s: Deleted hdcp auth work\n", __func__,
HDCP_STATE_NAME);
rc = cancel_work_sync(&hdcp_ctrl->hdcp_int_work);
if (rc)
DEV_DBG("%s: %s: Deleted hdcp int work\n", __func__,
HDCP_STATE_NAME);
DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
/* Disable encryption and disable the HDCP block */
DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
DEV_DBG("%s: %s: HDCP: Off\n", __func__, HDCP_STATE_NAME);
} /* hdmi_hdcp_off */
int hdmi_hdcp_isr(void *input)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
int rc = 0;
struct dss_io_data *io;
u32 hdcp_int_val;
if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
DEV_ERR("%s: invalid input\n", __func__);
rc = -EINVAL;
goto error;
}
io = hdcp_ctrl->init_data.core_io;
hdcp_int_val = DSS_REG_R(io, HDMI_HDCP_INT_CTRL);
/* Ignore HDCP interrupts if HDCP is disabled */
if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) {
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, HDCP_INT_CLR);
return 0;
}
if (hdcp_int_val & BIT(0)) {
/* AUTH_SUCCESS_INT */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(1)));
DEV_INFO("%s: %s: AUTH_SUCCESS_INT received\n", __func__,
HDCP_STATE_NAME);
if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state)
complete_all(&hdcp_ctrl->r0_checked);
}
if (hdcp_int_val & BIT(4)) {
/* AUTH_FAIL_INT */
u32 link_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(5)));
DEV_INFO("%s: %s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
__func__, HDCP_STATE_NAME, link_status);
if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state) {
/* Inform HDMI Tx of the failure */
queue_work(hdcp_ctrl->init_data.workq,
&hdcp_ctrl->hdcp_int_work);
/* todo: print debug log with auth fail reason */
} else if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
complete_all(&hdcp_ctrl->r0_checked);
}
/* Clear AUTH_FAIL_INFO as well */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(7)));
}
if (hdcp_int_val & BIT(8)) {
/* DDC_XFER_REQ_INT */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(9)));
DEV_INFO("%s: %s: DDC_XFER_REQ_INT received\n", __func__,
HDCP_STATE_NAME);
}
if (hdcp_int_val & BIT(12)) {
/* DDC_XFER_DONE_INT */
DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(13)));
DEV_INFO("%s: %s: DDC_XFER_DONE received\n", __func__,
HDCP_STATE_NAME);
}
error:
return rc;
} /* hdmi_hdcp_isr */
static ssize_t hdmi_hdcp_sysfs_rda_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct hdmi_hdcp_ctrl *hdcp_ctrl =
hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
mutex_lock(hdcp_ctrl->init_data.mutex);
ret = snprintf(buf, PAGE_SIZE, "%d\n", hdcp_ctrl->hdcp_state);
DEV_DBG("%s: '%d'\n", __func__, hdcp_ctrl->hdcp_state);
mutex_unlock(hdcp_ctrl->init_data.mutex);
return ret;
} /* hdmi_hdcp_sysfs_rda_hdcp*/
static ssize_t hdmi_hdcp_sysfs_rda_tp(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret = 0;
struct hdmi_hdcp_ctrl *hdcp_ctrl =
hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
switch (hdcp_ctrl->tp_msgid) {
case DOWN_CHECK_TOPOLOGY:
case DOWN_REQUEST_TOPOLOGY:
buf[MSG_ID_IDX] = hdcp_ctrl->tp_msgid;
buf[RET_CODE_IDX] = HDCP_AUTHED;
ret = HEADER_LEN;
memcpy(buf + HEADER_LEN, &hdcp_ctrl->cached_tp,
sizeof(struct HDCP_V2V1_MSG_TOPOLOGY));
ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
/* clear the flag once data is read back to user space*/
hdcp_ctrl->tp_msgid = -1;
break;
default:
ret = -EINVAL;
}
return ret;
} /* hdmi_hdcp_sysfs_rda_tp*/
static ssize_t hdmi_hdcp_sysfs_wta_tp(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int msgid = 0;
ssize_t ret = count;
struct hdmi_hdcp_ctrl *hdcp_ctrl =
hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_HDCP);
if (!hdcp_ctrl || !buf) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
msgid = buf[0];
switch (msgid) {
case DOWN_CHECK_TOPOLOGY:
case DOWN_REQUEST_TOPOLOGY:
hdcp_ctrl->tp_msgid = msgid;
break;
/* more cases added here */
default:
ret = -EINVAL;
}
return ret;
} /* hdmi_tx_sysfs_wta_hpd */
static DEVICE_ATTR(status, S_IRUGO, hdmi_hdcp_sysfs_rda_status, NULL);
static DEVICE_ATTR(tp, S_IRUGO | S_IWUSR, hdmi_hdcp_sysfs_rda_tp,
hdmi_hdcp_sysfs_wta_tp);
static struct attribute *hdmi_hdcp_fs_attrs[] = {
&dev_attr_status.attr,
&dev_attr_tp.attr,
NULL,
};
static struct attribute_group hdmi_hdcp_fs_attr_group = {
.name = "hdcp",
.attrs = hdmi_hdcp_fs_attrs,
};
void hdmi_hdcp_deinit(void *input)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
if (!hdcp_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return;
}
sysfs_remove_group(hdcp_ctrl->init_data.sysfs_kobj,
&hdmi_hdcp_fs_attr_group);
kfree(hdcp_ctrl);
} /* hdmi_hdcp_deinit */
void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
int ret;
if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
!init_data->mutex || !init_data->ddc_ctrl ||
!init_data->notify_status || !init_data->workq ||
!init_data->cb_data) {
DEV_ERR("%s: invalid input\n", __func__);
goto error;
}
hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
if (!hdcp_ctrl) {
DEV_ERR("%s: Out of memory\n", __func__);
goto error;
}
hdcp_ctrl->init_data = *init_data;
if (sysfs_create_group(init_data->sysfs_kobj,
&hdmi_hdcp_fs_attr_group)) {
DEV_ERR("%s: hdcp sysfs group creation failed\n", __func__);
goto error;
}
INIT_DELAYED_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
INIT_WORK(&hdcp_ctrl->hdcp_int_work, hdmi_hdcp_int_work);
hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
init_completion(&hdcp_ctrl->r0_checked);
ret = scm_is_call_available(SCM_SVC_HDCP, SCM_CMD_HDCP);
if (ret <= 0) {
DEV_ERR("%s: error: secure hdcp service unavailable, ret = %d",
__func__, ret);
} else {
DEV_DBG("%s: tz_hdcp = 1\n", __func__);
hdcp_ctrl->tz_hdcp = 1;
}
DEV_DBG("%s: HDCP module initialized. HDCP_STATE=%s", __func__,
HDCP_STATE_NAME);
error:
return (void *)hdcp_ctrl;
} /* hdmi_hdcp_init */
| gpl-2.0 |
Fuzion24/SM-G900V_NA_KK_Opensource-S5-Kernel- | drivers/mfd/pm8xxx-spk.c | 2204 | 7461 | /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/debugfs.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/mfd/pm8xxx/spk.h>
#define PM8XXX_SPK_CTL1_REG_OFF 0
#define PM8XXX_SPK_CTL2_REG_OFF 1
#define PM8XXX_SPK_CTL3_REG_OFF 2
#define PM8XXX_SPK_CTL4_REG_OFF 3
#define PM8XXX_SPK_TEST_REG_1_OFF 4
#define PM8XXX_SPK_TEST_REG_2_OFF 5
#define PM8XXX_SPK_BANK_SEL 4
#define PM8XXX_SPK_BANK_WRITE 0x80
#define PM8XXX_SPK_BANK_VAL_MASK 0xF
#define BOOST_6DB_GAIN_EN_MASK 0x8
#define VSEL_LD0_1P1 0x0
#define VSEL_LD0_1P2 0x2
#define VSEL_LD0_1P0 0x4
#define PWM_EN_MASK 0xF
#define PM8XXX_SPK_TEST_REG_1_BANKS 8
#define PM8XXX_SPK_TEST_REG_2_BANKS 2
#define PM8XXX_SPK_GAIN 0x5
#define PM8XXX_ADD_EN 0x1
struct pm8xxx_spk_chip {
struct list_head link;
struct pm8xxx_spk_platform_data pdata;
struct device *dev;
enum pm8xxx_version version;
struct mutex spk_mutex;
u16 base;
u16 end;
};
static struct pm8xxx_spk_chip *the_spk_chip;
static inline bool spk_defined(void)
{
if (the_spk_chip == NULL || IS_ERR(the_spk_chip))
return false;
return true;
}
static int pm8xxx_spk_bank_write(u16 reg, u16 bank, u8 val)
{
int rc = 0;
u8 bank_val = PM8XXX_SPK_BANK_WRITE | (bank << PM8XXX_SPK_BANK_SEL);
bank_val |= (val & PM8XXX_SPK_BANK_VAL_MASK);
mutex_lock(&the_spk_chip->spk_mutex);
rc = pm8xxx_writeb(the_spk_chip->dev->parent, reg, bank_val);
if (rc)
pr_err("pm8xxx_writeb(): rc=%d\n", rc);
mutex_unlock(&the_spk_chip->spk_mutex);
return rc;
}
static int pm8xxx_spk_read(u16 addr)
{
int rc = 0;
u8 val = 0;
mutex_lock(&the_spk_chip->spk_mutex);
rc = pm8xxx_readb(the_spk_chip->dev->parent,
the_spk_chip->base + addr, &val);
if (rc) {
pr_err("pm8xxx_spk_readb() failed: rc=%d\n", rc);
val = rc;
}
mutex_unlock(&the_spk_chip->spk_mutex);
return val;
}
static int pm8xxx_spk_write(u16 addr, u8 val)
{
int rc = 0;
mutex_lock(&the_spk_chip->spk_mutex);
rc = pm8xxx_writeb(the_spk_chip->dev->parent,
the_spk_chip->base + addr, val);
if (rc)
pr_err("pm8xxx_writeb() failed: rc=%d\n", rc);
mutex_unlock(&the_spk_chip->spk_mutex);
return rc;
}
int pm8xxx_spk_mute(bool mute)
{
u8 val = 0;
int ret = 0;
if (spk_defined() == false) {
pr_err("Invalid spk handle or no spk_chip\n");
return -ENODEV;
}
val = pm8xxx_spk_read(PM8XXX_SPK_CTL1_REG_OFF);
val |= mute << 2;
ret = pm8xxx_spk_write(PM8XXX_SPK_CTL1_REG_OFF, val);
return ret;
}
EXPORT_SYMBOL_GPL(pm8xxx_spk_mute);
int pm8xxx_spk_gain(u8 gain)
{
u8 val;
int ret = 0;
if (spk_defined() == false) {
pr_err("Invalid spk handle or no spk_chip\n");
return -ENODEV;
}
val = pm8xxx_spk_read(PM8XXX_SPK_CTL1_REG_OFF);
val = (gain << 4) | (val & 0xF);
ret = pm8xxx_spk_write(PM8XXX_SPK_CTL1_REG_OFF, val);
if (!ret) {
pm8xxx_spk_bank_write(the_spk_chip->base
+ PM8XXX_SPK_TEST_REG_1_OFF,
0, BOOST_6DB_GAIN_EN_MASK | VSEL_LD0_1P2);
}
return ret;
}
EXPORT_SYMBOL_GPL(pm8xxx_spk_gain);
int pm8xxx_spk_enable(int enable)
{
int val = 0;
u16 addr;
int ret = 0;
if (spk_defined() == false) {
pr_err("Invalid spk handle or no spk_chip\n");
return -ENODEV;
}
addr = the_spk_chip->base + PM8XXX_SPK_TEST_REG_1_OFF;
val = pm8xxx_spk_read(PM8XXX_SPK_CTL1_REG_OFF);
if (val < 0)
return val;
if (enable)
val |= (1 << 3);
else
val &= ~(1 << 3);
ret = pm8xxx_spk_write(PM8XXX_SPK_CTL1_REG_OFF, val);
if (!ret)
ret = pm8xxx_spk_bank_write(addr, 6, PWM_EN_MASK);
return ret;
}
EXPORT_SYMBOL_GPL(pm8xxx_spk_enable);
static int pm8xxx_spk_config(void)
{
u16 addr;
int ret = 0;
if (spk_defined() == false) {
pr_err("Invalid spk handle or no spk_chip\n");
return -ENODEV;
}
addr = the_spk_chip->base + PM8XXX_SPK_TEST_REG_1_OFF;
ret = pm8xxx_spk_bank_write(addr, 6, PWM_EN_MASK & 0);
if (!ret)
ret = pm8xxx_spk_gain(PM8XXX_SPK_GAIN);
return ret;
}
static int __devinit pm8xxx_spk_probe(struct platform_device *pdev)
{
const struct pm8xxx_spk_platform_data *pdata = pdev->dev.platform_data;
int ret = 0;
u8 value = 0;
if (!pdata) {
pr_err("missing platform data\n");
return -EINVAL;
}
the_spk_chip = kzalloc(sizeof(struct pm8xxx_spk_chip), GFP_KERNEL);
if (the_spk_chip == NULL) {
pr_err("kzalloc() failed.\n");
return -ENOMEM;
}
mutex_init(&the_spk_chip->spk_mutex);
the_spk_chip->dev = &pdev->dev;
the_spk_chip->version = pm8xxx_get_version(the_spk_chip->dev->parent);
switch (pm8xxx_get_version(the_spk_chip->dev->parent)) {
case PM8XXX_VERSION_8038:
break;
default:
ret = -ENODEV;
goto err_handle;
}
memcpy(&(the_spk_chip->pdata), pdata,
sizeof(struct pm8xxx_spk_platform_data));
the_spk_chip->base = pdev->resource[0].start;
the_spk_chip->end = pdev->resource[0].end;
if (the_spk_chip->pdata.spk_add_enable) {
int val;
val = pm8xxx_spk_read(PM8XXX_SPK_CTL1_REG_OFF);
if (val < 0) {
ret = val;
goto err_handle;
}
val |= (the_spk_chip->pdata.spk_add_enable & PM8XXX_ADD_EN);
ret = pm8xxx_spk_write(PM8XXX_SPK_CTL1_REG_OFF, val);
if (ret < 0)
goto err_handle;
}
value = ((the_spk_chip->pdata.cd_ng_threshold << 5) |
the_spk_chip->pdata.cd_nf_preamp_bias << 3);
pr_debug("Setting SPK_CTL2_REG = %02x\n", value);
pm8xxx_spk_write(PM8XXX_SPK_CTL2_REG_OFF, value);
value = ((the_spk_chip->pdata.cd_ng_hold << 5) |
(the_spk_chip->pdata.cd_ng_max_atten << 1) |
the_spk_chip->pdata.noise_mute);
pr_debug("Setting SPK_CTL3_REG = %02x\n", value);
pm8xxx_spk_write(PM8XXX_SPK_CTL3_REG_OFF, value);
value = ((the_spk_chip->pdata.cd_ng_decay_rate << 5) |
(the_spk_chip->pdata.cd_ng_attack_rate << 3) |
the_spk_chip->pdata.cd_delay << 2);
pr_debug("Setting SPK_CTL4_REG = %02x\n", value);
pm8xxx_spk_write(PM8XXX_SPK_CTL4_REG_OFF, value);
return pm8xxx_spk_config();
err_handle:
pr_err("pm8xxx_spk_probe failed."
"Audio unavailable on speaker.\n");
mutex_destroy(&the_spk_chip->spk_mutex);
kfree(the_spk_chip);
return ret;
}
static int __devexit pm8xxx_spk_remove(struct platform_device *pdev)
{
if (spk_defined() == false) {
pr_err("Invalid spk handle or no spk_chip\n");
return -ENODEV;
}
mutex_destroy(&the_spk_chip->spk_mutex);
kfree(the_spk_chip);
return 0;
}
static struct platform_driver pm8xxx_spk_driver = {
.probe = pm8xxx_spk_probe,
.remove = __devexit_p(pm8xxx_spk_remove),
.driver = {
.name = PM8XXX_SPK_DEV_NAME,
.owner = THIS_MODULE,
},
};
static int __init pm8xxx_spk_init(void)
{
return platform_driver_register(&pm8xxx_spk_driver);
}
subsys_initcall(pm8xxx_spk_init);
static void __exit pm8xxx_spk_exit(void)
{
platform_driver_unregister(&pm8xxx_spk_driver);
}
module_exit(pm8xxx_spk_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PM8XXX SPK driver");
MODULE_ALIAS("platform:" PM8XXX_SPK_DEV_NAME);
| gpl-2.0 |
mgaleae/android_kernel_samsung_smdk4412_utouch | drivers/mtd/chips/cfi_cmdset_0002.c | 2460 | 56645 | /*
* Common Flash Interface support:
* AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
*
* Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
* Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
* Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
*
* 2_by_8 routines added by Simon Munton
*
* 4_by_16 work by Carolyn J. Smith
*
* XIP support hooks by Vitaly Wool (based on code for Intel flash
* by Nicolas Pitre)
*
* 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
*
* Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*
* This code is GPL
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/xip.h>
#define AMD_BOOTLOC_BUG
#define FORCE_WORD_WRITE 0
#define MAX_WORD_RETRIES 3
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
#include "fwh_lock.h"
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
.name = "cfi_cmdset_0002",
.module = THIS_MODULE
};
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_amdstd *extp)
{
const char* erase_suspend[3] = {
"Not supported", "Read only", "Read/write"
};
const char* top_bottom[6] = {
"No WP", "8x8KiB sectors at top & bottom, no WP",
"Bottom boot", "Top boot",
"Uniform, Bottom WP", "Uniform, Top WP"
};
printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
printk(" Address sensitive unlock: %s\n",
(extp->SiliconRevision & 1) ? "Not required" : "Required");
if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
else
printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
if (extp->BlkProt == 0)
printk(" Block protection: Not supported\n");
else
printk(" Block protection: %d sectors per group\n", extp->BlkProt);
printk(" Temporary block unprotect: %s\n",
extp->TmpBlkUnprotect ? "Supported" : "Not supported");
printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
printk(" Burst mode: %s\n",
extp->BurstMode ? "Supported" : "Not supported");
if (extp->PageMode == 0)
printk(" Page mode: Not supported\n");
else
printk(" Page mode: %d word page\n", extp->PageMode << 2);
printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
extp->VppMin >> 4, extp->VppMin & 0xf);
printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
extp->VppMax >> 4, extp->VppMax & 0xf);
if (extp->TopBottom < ARRAY_SIZE(top_bottom))
printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
else
printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
}
#endif
#ifdef AMD_BOOTLOC_BUG
/* Wheee. Bring me the head of someone at AMD. */
static void fixup_amd_bootblock(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
__u8 major = extp->MajorVersion;
__u8 minor = extp->MinorVersion;
if (((major << 8) | minor) < 0x3131) {
/* CFI version 1.0 => don't trust bootloc */
DEBUG(MTD_DEBUG_LEVEL1,
"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
map->name, cfi->mfr, cfi->id);
/* AFAICS all 29LV400 with a bottom boot block have a device ID
* of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
* These were badly detected as they have the 0x80 bit set
* so treat them as a special case.
*/
if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
/* Macronix added CFI to their 2nd generation
* MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
* Fujitsu, Spansion, EON, ESI and older Macronix)
* has CFI.
*
* Therefore also check the manufacturer.
* This reduces the risk of false detection due to
* the 8-bit device ID.
*/
(cfi->mfr == CFI_MFR_MACRONIX)) {
DEBUG(MTD_DEBUG_LEVEL1,
"%s: Macronix MX29LV400C with bottom boot block"
" detected\n", map->name);
extp->TopBottom = 2; /* bottom boot */
} else
if (cfi->id & 0x80) {
printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
extp->TopBottom = 3; /* top boot */
} else {
extp->TopBottom = 2; /* bottom boot */
}
DEBUG(MTD_DEBUG_LEVEL1,
"%s: AMD CFI PRI V%c.%c has no boot block field;"
" deduced %s from Device ID\n", map->name, major, minor,
extp->TopBottom == 2 ? "bottom" : "top");
}
}
#endif
static void fixup_use_write_buffers(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if (cfi->cfiq->BufWriteTimeoutTyp) {
DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
mtd->write = cfi_amdstd_write_buffers;
}
}
/* Atmel chips don't use the same PRI format as AMD chips */
static void fixup_convert_atmel_pri(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
struct cfi_pri_atmel atmel_pri;
memcpy(&atmel_pri, extp, sizeof(atmel_pri));
memset((char *)extp + 5, 0, sizeof(*extp) - 5);
if (atmel_pri.Features & 0x02)
extp->EraseSuspend = 2;
/* Some chips got it backwards... */
if (cfi->id == AT49BV6416) {
if (atmel_pri.BottomBoot)
extp->TopBottom = 3;
else
extp->TopBottom = 2;
} else {
if (atmel_pri.BottomBoot)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
}
/* burst write mode not supported */
cfi->cfiq->BufWriteTimeoutTyp = 0;
cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_use_secsi(struct mtd_info *mtd)
{
/* Setup for chips with a secsi area */
mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
}
static void fixup_use_erase_chip(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->NumEraseRegions == 1) &&
((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
mtd->erase = cfi_amdstd_erase_chip;
}
}
/*
* Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
* locked by default.
*/
static void fixup_use_atmel_lock(struct mtd_info *mtd)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
mtd->flags |= MTD_POWERUP_LOCK;
}
static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
/*
* These flashes report two separate eraseblock regions based on the
* sector_erase-size and block_erase-size, although they both operate on the
* same memory. This is not allowed according to CFI, so we just pick the
* sector_erase-size.
*/
cfi->cfiq->NumEraseRegions = 1;
}
static void fixup_sst39vf(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x5555;
cfi->addr_unlock2 = 0x2AAA;
}
static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_old_sst_eraseregion(mtd);
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2AA;
cfi->sector_erase_cmd = CMD(0x50);
}
static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
fixup_sst39vf_rev_b(mtd);
/*
* CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
* it should report a size of 8KBytes (0x0020*256).
*/
cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
}
static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
}
}
static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
}
}
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
{ CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
{ CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
{ CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
{ CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
{ CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
{ CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
{ CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
{ 0, 0, NULL }
};
static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
{ CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
{ CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
#endif
{ CFI_MFR_AMD, 0x0050, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0053, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0055, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0056, fixup_use_secsi },
{ CFI_MFR_AMD, 0x005C, fixup_use_secsi },
{ CFI_MFR_AMD, 0x005F, fixup_use_secsi },
{ CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
{ CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
#endif
{ 0, 0, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
{ CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
{ CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
{ CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
{ 0, 0, NULL }
};
static struct cfi_fixup fixup_table[] = {
/* The CFI vendor ids and the JEDEC vendor IDs appear
* to be common. It is like the devices id's are as
* well. This table is to pick all cases where
* we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
{ 0, 0, NULL }
};
static void cfi_fixup_major_minor(struct cfi_private *cfi,
struct cfi_pri_amdstd *extp)
{
if (cfi->mfr == CFI_MFR_SAMSUNG) {
if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
(extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
/*
* Samsung K8P2815UQB and K8D6x16UxM chips
* report major=0 / minor=0.
* K8D3x16UxC chips report major=3 / minor=3.
*/
printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
" Extended Query version to 1.%c\n",
extp->MinorVersion);
extp->MajorVersion = '1';
}
}
/*
* SST 38VF640x chips report major=0xFF / minor=0xFF.
*/
if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
extp->MajorVersion = '1';
extp->MinorVersion = '0';
}
}
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
struct mtd_info *mtd;
int i;
mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
return NULL;
}
mtd->priv = map;
mtd->type = MTD_NORFLASH;
/* Fill in the default mtd operations */
mtd->erase = cfi_amdstd_erase_varsize;
mtd->write = cfi_amdstd_write_words;
mtd->read = cfi_amdstd_read;
mtd->sync = cfi_amdstd_sync;
mtd->suspend = cfi_amdstd_suspend;
mtd->resume = cfi_amdstd_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
__func__, mtd->writebufsize);
mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
if (cfi->cfi_mode==CFI_MODE_CFI){
unsigned char bootloc;
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_amdstd *extp;
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
if (extp) {
/*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure.
*/
cfi_fixup_major_minor(cfi, extp);
/*
* Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
* see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
* http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
*/
if (extp->MajorVersion != '1' ||
(extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
"version %c.%c (%#02x/%#02x).\n",
extp->MajorVersion, extp->MinorVersion,
extp->MajorVersion, extp->MinorVersion);
kfree(extp);
kfree(mtd);
return NULL;
}
printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
extp->MajorVersion, extp->MinorVersion);
/* Install our own private info structure */
cfi->cmdset_priv = extp;
/* Apply cfi device specific fixups */
cfi_fixup(mtd, cfi_fixup_table);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
#endif
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
"bank location (%d). Assuming bottom.\n",
map->name, bootloc);
bootloc = 2;
}
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
int j = (cfi->cfiq->NumEraseRegions-1)-i;
__u32 swap;
swap = cfi->cfiq->EraseRegionInfo[i];
cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
cfi->cfiq->EraseRegionInfo[j] = swap;
}
}
/* Set the default CFI lock/unlock addresses */
cfi->addr_unlock1 = 0x555;
cfi->addr_unlock2 = 0x2aa;
}
cfi_fixup(mtd, cfi_nopri_fixup_table);
if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
kfree(mtd);
return NULL;
}
} /* CFI mode */
else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
/* Apply jedec specific fixups */
cfi_fixup(mtd, jedec_fixup_table);
}
/* Apply generic fixups */
cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
map->fldrv = &cfi_amdstd_chipdrv;
return cfi_amdstd_setup(mtd);
}
struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
unsigned long offset = 0;
int i,j;
printk(KERN_NOTICE "number of %s chips: %d\n",
(cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
/* Select the correct geometry setup */
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
if (!mtd->eraseregions) {
printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
goto setup_err;
}
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
if (mtd->erasesize < ersize) {
mtd->erasesize = ersize;
}
for (j=0; j<cfi->numchips; j++) {
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
}
if (offset != devsize) {
/* Argh */
printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
goto setup_err;
}
__module_get(THIS_MODULE);
register_reboot_notifier(&mtd->reboot_notifier);
return mtd;
setup_err:
kfree(mtd->eraseregions);
kfree(mtd);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
return NULL;
}
/*
* Return true if the chip is ready.
*
* Ready is one of: read mode, query mode, erase-suspend-read mode (in any
* non-suspended sector) and is indicated by no toggle bits toggling.
*
* Note that anything more complicated than checking if no bits are toggling
* (including checking DQ5 for an error status) is tricky to get working
* correctly and is therefore not done (particularly with interleaved chips
* as each chip must be checked independently of the others).
*/
static int __xipram chip_ready(struct map_info *map, unsigned long addr)
{
map_word d, t;
d = map_read(map, addr);
t = map_read(map, addr);
return map_word_equal(map, d, t);
}
/*
* Return true if the chip is ready and has the correct value.
*
* Ready is one of: read mode, query mode, erase-suspend-read mode (in any
* non-suspended sector) and it is indicated by no bits toggling.
*
* Error are indicated by toggling bits or bits held with the wrong value,
* or with bits toggling.
*
* Note that anything more complicated than checking if no bits are toggling
* (including checking DQ5 for an error status) is tricky to get working
* correctly and is therefore not done (particularly with interleaved chips
* as each chip must be checked independently of the others).
*
*/
static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
{
map_word oldd, curd;
oldd = map_read(map, addr);
curd = map_read(map, addr);
return map_word_equal(map, oldd, curd) &&
map_word_equal(map, curd, expected);
}
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo;
struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
resettime:
timeo = jiffies + HZ;
retry:
switch (chip->state) {
case FL_STATUS:
for (;;) {
if (chip_ready(map, adr))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Someone else might have been playing with it. */
goto retry;
}
case FL_READY:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
return 0;
case FL_ERASING:
if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
!(mode == FL_READY || mode == FL_POINT ||
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
/* We could check to see if we're trying to access the sector
* that is currently being erased. However, no user will try
* anything like that so we just wait for the timeout. */
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
* commands when the erase algorithm isn't in progress. */
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
if (chip_ready(map, adr))
break;
if (time_after(jiffies, timeo)) {
/* Should have suspended the erase by now.
* Send an Erase-Resume command as either
* there was an error (so leave the erase
* routine to recover from it) or we trying to
* use the erase-in-progress sector. */
put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex);
/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_READY;
return 0;
case FL_XIP_WHILE_ERASING:
if (mode != FL_READY && mode != FL_POINT &&
(!cfip || !(cfip->EraseSuspend&2)))
goto sleep;
chip->oldstate = chip->state;
chip->state = FL_READY;
return 0;
case FL_SHUTDOWN:
/* The machine is rebooting */
return -EIO;
case FL_POINT:
/* Only if there's no operation suspended... */
if (mode == FL_READY && chip->oldstate == FL_READY)
return 0;
default:
sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
goto resettime;
}
}
static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
switch(chip->oldstate) {
case FL_ERASING:
map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY;
break;
case FL_READY:
case FL_STATUS:
/* We should really make set_vpp() count, rather than doing this */
DISABLE_VPP(map);
break;
default:
printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/*
* No interrupt what so ever can be serviced while the flash isn't in array
* mode. This is ensured by the xip_disable() and xip_enable() functions
* enclosing any code path where the flash is known not to be in array mode.
* And within a XIP disabled code path, only functions marked with __xipram
* may be called and nothing else (it's a good thing to inspect generated
* assembly to make sure inline functions were actually inlined and that gcc
* didn't emit calls to its own support functions). Also configuring MTD CFI
* support to a single buswidth and a single interleave is also recommended.
*/
static void xip_disable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
/* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
struct cfi_private *cfi = map->fldrv_priv;
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xf0), adr);
chip->state = FL_READY;
}
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
}
/*
* When a delay is required for the flash operation to complete, the
* xip_udelay() function is polling for both the given timeout and pending
* (but still masked) hardware interrupts. Whenever there is an interrupt
* pending then the flash erase operation is suspended, array mode restored
* and interrupts unmasked. Task scheduling might also happen at that
* point. The CPU eventually returns from the interrupt or the call to
* schedule() and the suspended flash operation is resumed for the remaining
* of the delay period.
*
* Warning: this function _will_ fool interrupt latency tracing tools.
*/
static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
unsigned long adr, int usec)
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
map_word status, OK = CMD(0x80);
unsigned long suspended, start = xip_currtime();
flstate_t oldstate;
do {
cpu_relax();
if (xip_irqpending() && extp &&
((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
/*
* Let's suspend the erase operation when supported.
* Note that we currently don't try to suspend
* interleaved chips if there is already another
* operation suspended (imagine what happens
* when one chip was already done with the current
* operation while another chip suspended it, then
* we resume the whole thing at once). Yes, it
* can happen!
*/
map_write(map, CMD(0xb0), adr);
usec -= xip_elapsed_since(start);
suspended = xip_currtime();
do {
if (xip_elapsed_since(suspended) > 100000) {
/*
* The chip doesn't want to suspend
* after waiting for 100 msecs.
* This is a critical error but there
* is not much we can do here.
*/
return;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* Suspend succeeded */
oldstate = chip->state;
if (!map_word_bitsset(map, status, CMD(0x40)))
break;
chip->state = FL_XIP_WHILE_ERASING;
chip->erase_suspended = 1;
map_write(map, CMD(0xf0), adr);
(void) map_read(map, adr);
xip_iprefetch();
local_irq_enable();
mutex_unlock(&chip->mutex);
xip_iprefetch();
cond_resched();
/*
* We're back. However someone else might have
* decided to go write to the chip if we are in
* a suspended erase state. If so let's wait
* until it's done.
*/
mutex_lock(&chip->mutex);
while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
}
/* Disallow XIP again */
local_irq_disable();
/* Resume the write or erase operation */
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
start = xip_currtime();
} else if (usec >= 1000000/HZ) {
/*
* Try to save on CPU power when waiting delay
* is at least a system timer tick period.
* No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK)
&& xip_elapsed_since(start) < usec);
}
#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
/*
* The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
* the flash is actively programming or erasing since we have to poll for
* the operation to complete anyway. We can't do that in a generic way with
* a XIP setup so do it before the actual flash operation in this case
* and stub it out from INVALIDATE_CACHE_UDELAY.
*/
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
UDELAY(map, chip, adr, usec)
/*
* Extra notes:
*
* Activating this XIP support changes the way the code works a bit. For
* example the code to suspend the current process when concurrent access
* happens is never executed because xip_udelay() will always return with the
* same chip state as it was entered with. This is why there is no care for
* the presence of add_wait_queue() or schedule() calls from within a couple
* xip_disable()'d areas of code, like in do_erase_oneblock for example.
* The queueing and scheduling are always happening within xip_udelay().
*
* Similarly, get_chip() and put_chip() just happen to always be executed
* with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
* is in array mode, therefore never executing many cases therein and not
* causing any problem with XIP.
*/
#else
#define xip_disable(map, chip, adr)
#define xip_enable(map, chip, adr)
#define XIP_INVAL_CACHED_RANGE(x...)
#define UDELAY(map, chip, adr, usec) \
do { \
mutex_unlock(&chip->mutex); \
cfi_udelay(usec); \
mutex_lock(&chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
do { \
mutex_unlock(&chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
cfi_udelay(usec); \
mutex_lock(&chip->mutex); \
} while (0)
#endif
static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
int ret;
adr += chip->start;
/* Ensure cmd read/writes are aligned. */
cmd_addr = adr & ~(map_bankwidth(map)-1);
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, cmd_addr, FL_READY);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
if (chip->state != FL_POINT && chip->state != FL_READY) {
map_write(map, CMD(0xf0), cmd_addr);
chip->state = FL_READY;
}
map_copy_from(map, buf, adr, len);
put_chip(map, chip, cmd_addr);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
chipnum = (from >> cfi->chipshift);
ofs = from - (chipnum << cfi->chipshift);
*retlen = 0;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> cfi->chipshift)
thislen = (1<<cfi->chipshift) - ofs;
else
thislen = len;
ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo = jiffies + HZ;
struct cfi_private *cfi = map->fldrv_priv;
retry:
mutex_lock(&chip->mutex);
if (chip->state != FL_READY){
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + HZ;
goto retry;
}
adr += chip->start;
chip->state = FL_READY;
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_copy_from(map, buf, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
return 0;
}
static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
unsigned long ofs;
int chipnum;
int ret = 0;
/* ofs: offset within the first chip that the first read should start */
/* 8 secsi bytes per chip */
chipnum=from>>3;
ofs=from & 7;
*retlen = 0;
while (len) {
unsigned long thislen;
if (chipnum >= cfi->numchips)
break;
if ((len + ofs -1) >> 3)
thislen = (1<<3) - ofs;
else
thislen = len;
ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
if (ret)
break;
*retlen += thislen;
len -= thislen;
buf += thislen;
ofs = 0;
chipnum++;
}
return ret;
}
static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
/*
* We use a 1ms + 1 jiffies generic timeout for writes (most devices
* have a max write time of a few hundreds usec). However, we should
* use the maximum timeout value given by the chip at probe time
* instead. Unfortunately, struct flchip does have a field for
* maximum timeout, only for typical which can be far too short
* depending of the conditions. The ' + 1' is to avoid having a
* timeout of 0 jiffies if HZ is smaller than 1000.
*/
unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
int ret = 0;
map_word oldd;
int retry_cnt = 0;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
/*
* Check for a NOP for the case when the datum to write is already
* present - it saves time and works around buggy chips that corrupt
* data at other locations when 0xff is written to a location that
* already contains 0xff.
*/
oldd = map_read(map, adr);
if (map_word_equal(map, oldd, datum)) {
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
__func__);
goto op_done;
}
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, datum, adr);
chip->state = FL_WRITING;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
chip->word_write_time);
/* See comment above for timeout value. */
timeo = jiffies + uWriteTimeout;
for (;;) {
if (chip->state != FL_WRITING) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
break;
}
if (chip_ready(map, adr))
break;
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
/* Did we succeed? */
if (!chip_good(map, adr, datum)) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_WORD_RETRIES)
goto retry;
ret = -EIO;
}
xip_enable(map, chip, adr);
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs, chipstart;
DECLARE_WAITQUEUE(wait, current);
*retlen = 0;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
/* If it's not bus-aligned, do the first byte write */
if (ofs & (map_bankwidth(map)-1)) {
unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int i = ofs - bus_ofs;
int n = 0;
map_word tmp_buf;
retry:
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
goto retry;
}
/* Load 'tmp_buf' with old contents of flash */
tmp_buf = map_read(map, bus_ofs+chipstart);
mutex_unlock(&cfi->chips[chipnum].mutex);
/* Number of bytes to copy from buffer */
n = min_t(int, len, map_bankwidth(map)-i);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
bus_ofs, tmp_buf);
if (ret)
return ret;
ofs += n;
buf += n;
(*retlen) += n;
len -= n;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
/* We are now aligned, write as much as possible */
while(len >= map_bankwidth(map)) {
map_word datum;
datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, datum);
if (ret)
return ret;
ofs += map_bankwidth(map);
buf += map_bankwidth(map);
(*retlen) += map_bankwidth(map);
len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
chipstart = cfi->chips[chipnum].start;
}
}
/* Write the trailing bytes if any */
if (len & (map_bankwidth(map)-1)) {
map_word tmp_buf;
retry1:
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
mutex_unlock(&cfi->chips[chipnum].mutex);
schedule();
remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
goto retry1;
}
tmp_buf = map_read(map, ofs + chipstart);
mutex_unlock(&cfi->chips[chipnum].mutex);
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
ofs, tmp_buf);
if (ret)
return ret;
(*retlen) += len;
}
return 0;
}
/*
* FIXME: interleaved mode not tested, and probably not supported!
*/
static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
unsigned long adr, const u_char *buf,
int len)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
/* see comments in do_write_oneword() regarding uWriteTimeo. */
unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
int ret = -EIO;
unsigned long cmd_adr;
int z, words;
map_word datum;
adr += chip->start;
cmd_adr = adr;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
datum = map_word_load(map, buf);
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, cmd_adr);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
/* Write Buffer Load */
map_write(map, CMD(0x25), cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;
/* Write length of data to come */
words = len / map_bankwidth(map);
map_write(map, CMD(words - 1), cmd_adr);
/* Write data */
z = 0;
while(z < words * map_bankwidth(map)) {
datum = map_word_load(map, buf);
map_write(map, datum, adr + z);
z += map_bankwidth(map);
buf += map_bankwidth(map);
}
z -= map_bankwidth(map);
adr += z;
/* Write Buffer Program Confirm: GO GO GO */
map_write(map, CMD(0x29), cmd_adr);
chip->state = FL_WRITING;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
chip->word_write_time);
timeo = jiffies + uWriteTimeout;
for (;;) {
if (chip->state != FL_WRITING) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
timeo = jiffies + (HZ / 2); /* FIXME */
mutex_lock(&chip->mutex);
continue;
}
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
break;
if (chip_ready(map, adr)) {
xip_enable(map, chip, adr);
goto op_done;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
xip_enable(map, chip, adr);
/* FIXME - should have reset delay before continuing */
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__ );
ret = -EIO;
op_done:
chip->state = FL_READY;
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
unsigned long ofs;
*retlen = 0;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first word write */
if (ofs & (map_bankwidth(map)-1)) {
size_t local_len = (-ofs)&(map_bankwidth(map)-1);
if (local_len > len)
local_len = len;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
local_len, retlen, buf);
if (ret)
return ret;
ofs += local_len;
buf += local_len;
len -= local_len;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
/* Write buffer is worth it only if more than one word to write... */
while (len >= map_bankwidth(map) * 2) {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len;
if (size % map_bankwidth(map))
size -= size % map_bankwidth(map);
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, buf, size);
if (ret)
return ret;
ofs += size;
buf += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
if (len) {
size_t retlen_dregs = 0;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
len, &retlen_dregs, buf);
*retlen += retlen_dregs;
return ret;
}
return 0;
}
/*
* Handle devices with one erase region, that only implement
* the chip erase command.
*/
static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
adr = cfi->addr_unlock1;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
__func__, chip->start );
XIP_INVAL_CACHED_RANGE(map, adr, map->size);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
chip->erase_time*500);
timeo = jiffies + (HZ*20);
for (;;) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
/* This erase was suspended and resumed.
Adjust the timeout */
timeo = jiffies + (HZ*20); /* FIXME */
chip->erase_suspended = 0;
}
if (chip_ready(map, adr))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__ );
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
if (!chip_good(map, adr, map_word_ff(map))) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
ret = -EIO;
}
chip->state = FL_READY;
xip_enable(map, chip, adr);
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
adr += chip->start;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_ERASING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
}
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
__func__, adr );
XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
xip_disable(map, chip, adr);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
chip->erase_time*500);
timeo = jiffies + (HZ*20);
for (;;) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
continue;
}
if (chip->erase_suspended) {
/* This erase was suspended and resumed.
Adjust the timeout */
timeo = jiffies + (HZ*20); /* FIXME */
chip->erase_suspended = 0;
}
if (chip_ready(map, adr)) {
xip_enable(map, chip, adr);
break;
}
if (time_after(jiffies, timeo)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__ );
break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
if (!chip_good(map, adr, map_word_ff(map))) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
ret = -EIO;
}
chip->state = FL_READY;
put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
{
unsigned long ofs, len;
int ret;
ofs = instr->addr;
len = instr->len;
ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
if (ret)
return ret;
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
if (instr->addr != 0)
return -EINVAL;
if (instr->len != mtd->size)
return -EINVAL;
ret = do_erase_chip(map, &cfi->chips[0]);
if (ret)
return ret;
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
return 0;
}
static int do_atmel_lock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
if (ret)
goto out_unlock;
chip->state = FL_LOCKING;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
__func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x40), chip->start + adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
mutex_unlock(&chip->mutex);
return ret;
}
static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
if (ret)
goto out_unlock;
chip->state = FL_UNLOCKING;
DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
__func__, adr, len);
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
map_write(map, CMD(0x70), adr);
chip->state = FL_READY;
put_chip(map, chip, adr + chip->start);
ret = 0;
out_unlock:
mutex_unlock(&chip->mutex);
return ret;
}
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
}
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
DECLARE_WAITQUEUE(wait, current);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
retry:
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
default:
/* Not an idle state */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
goto retry;
}
}
/* Unlock the chips again */
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
static int cfi_amdstd_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
int ret = 0;
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
switch(chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
/* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
case FL_PM_SUSPENDED:
break;
default:
ret = -EAGAIN;
break;
}
mutex_unlock(&chip->mutex);
}
/* Unlock the chips again */
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
mutex_unlock(&chip->mutex);
}
}
return ret;
}
static void cfi_amdstd_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
map_write(map, CMD(0xF0), chip->start);
wake_up(&chip->wq);
}
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
mutex_unlock(&chip->mutex);
}
}
/*
* Ensure that the flash device is put back into read array mode before
* unloading the driver or rebooting. On some systems, rebooting while
* the flash is in query/program/erase mode will prevent the CPU from
* fetching the bootloader code, requiring a hard reset or power cycle.
*/
static int cfi_amdstd_reset(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i, ret;
struct flchip *chip;
for (i = 0; i < cfi->numchips; i++) {
chip = &cfi->chips[i];
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
if (!ret) {
map_write(map, CMD(0xF0), chip->start);
chip->state = FL_SHUTDOWN;
put_chip(map, chip, chip->start);
}
mutex_unlock(&chip->mutex);
}
return 0;
}
static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
void *v)
{
struct mtd_info *mtd;
mtd = container_of(nb, struct mtd_info, reboot_notifier);
cfi_amdstd_reset(mtd);
return NOTIFY_DONE;
}
static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
cfi_amdstd_reset(mtd);
unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
kfree(cfi->cfiq);
kfree(cfi);
kfree(mtd->eraseregions);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
MODULE_ALIAS("cfi_cmdset_0006");
MODULE_ALIAS("cfi_cmdset_0701");
| gpl-2.0 |
garwynn/D710BST_GC01_Kernel | arch/arm/mach-netx/nxdb500.c | 2972 | 4646 | /*
* arch/arm/mach-netx/nxdb500.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mtd/plat-ram.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
#include "generic.h"
#include "fb.h"
static struct clcd_panel qvga = {
.mode = {
.name = "QVGA",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 187617,
.left_margin = 6,
.right_margin = 26,
.upper_margin = 0,
.lower_margin = 6,
.hsync_len = 6,
.vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
.width = -1,
.height = -1,
.tim2 = 16,
.cntl = CNTL_LCDTFT | CNTL_BGR,
.bpp = 16,
.grayscale = 0,
};
static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
var->green.length = 5;
var->green.msb_right = 0;
return clcdfb_check(fb, var);
}
static int nxdb500_clcd_setup(struct clcd_fb *fb)
{
unsigned int val;
fb->fb.var.green.length = 5;
fb->fb.var.green.msb_right = 0;
/* enable asic control */
val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
writel(3, NETX_SYSTEM_IOC_CR);
val = readl(NETX_PIO_OUTPIO);
writel(val | 1, NETX_PIO_OUTPIO);
val = readl(NETX_PIO_OEPIO);
writel(val | 1, NETX_PIO_OEPIO);
return netx_clcd_setup(fb);
}
static struct clcd_board clcd_data = {
.name = "netX",
.check = nxdb500_check,
.decode = clcdfb_decode,
.enable = netx_clcd_enable,
.setup = nxdb500_clcd_setup,
.mmap = netx_clcd_mmap,
.remove = netx_clcd_remove,
};
static struct netxeth_platform_data eth0_platform_data = {
.xcno = 0,
};
static struct platform_device netx_eth0_device = {
.name = "netx-eth",
.id = 0,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð0_platform_data,
}
};
static struct netxeth_platform_data eth1_platform_data = {
.xcno = 1,
};
static struct platform_device netx_eth1_device = {
.name = "netx-eth",
.id = 1,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð1_platform_data,
}
};
static struct resource netx_uart0_resources[] = {
[0] = {
.start = 0x00100A00,
.end = 0x00100A3F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART0),
.end = (NETX_IRQ_UART0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart0_device = {
.name = "netx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(netx_uart0_resources),
.resource = netx_uart0_resources,
};
static struct resource netx_uart1_resources[] = {
[0] = {
.start = 0x00100A40,
.end = 0x00100A7F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART1),
.end = (NETX_IRQ_UART1),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart1_device = {
.name = "netx-uart",
.id = 1,
.num_resources = ARRAY_SIZE(netx_uart1_resources),
.resource = netx_uart1_resources,
};
static struct resource netx_uart2_resources[] = {
[0] = {
.start = 0x00100A80,
.end = 0x00100ABF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART2),
.end = (NETX_IRQ_UART2),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart2_device = {
.name = "netx-uart",
.id = 2,
.num_resources = ARRAY_SIZE(netx_uart2_resources),
.resource = netx_uart2_resources,
};
static struct platform_device *devices[] __initdata = {
&netx_eth0_device,
&netx_eth1_device,
&netx_uart0_device,
&netx_uart1_device,
&netx_uart2_device,
};
static void __init nxdb500_init(void)
{
netx_fb_init(&clcd_data, &qvga);
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(NXDB500, "Hilscher nxdb500")
.boot_params = 0x80000100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
.timer = &netx_timer,
.init_machine = nxdb500_init,
MACHINE_END
| gpl-2.0 |
TEAM-Gummy/kernel_asus_grouper | drivers/acpi/acpica/evgpeutil.c | 3228 | 11119 | /******************************************************************************
*
* Module Name: evgpeutil - GPE utilities
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeutil")
/*******************************************************************************
*
* FUNCTION: acpi_ev_walk_gpe_list
*
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
* Context - Value passed to callback
*
* RETURN: Status
*
* DESCRIPTION: Walk the GPE lists.
*
******************************************************************************/
acpi_status
acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
acpi_status status = AE_OK;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Walk the interrupt level descriptor list */
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
/* One callback per GPE block */
status =
gpe_walk_callback(gpe_xrupt_info, gpe_block,
context);
if (ACPI_FAILURE(status)) {
if (status == AE_CTRL_END) { /* Callback abort */
status = AE_OK;
}
goto unlock_and_exit;
}
gpe_block = gpe_block->next;
}
gpe_xrupt_info = gpe_xrupt_info->next;
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_valid_gpe_event
*
* PARAMETERS: gpe_event_info - Info for this GPE
*
* RETURN: TRUE if the gpe_event is valid
*
* DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
*
******************************************************************************/
u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_xrupt_info *gpe_xrupt_block;
struct acpi_gpe_block_info *gpe_block;
ACPI_FUNCTION_ENTRY();
/* No need for spin lock since we are not changing any list elements */
/* Walk the GPE interrupt levels */
gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_block) {
gpe_block = gpe_xrupt_block->gpe_block_list_head;
/* Walk the GPE blocks on this interrupt level */
while (gpe_block) {
if ((&gpe_block->event_info[0] <= gpe_event_info) &&
(&gpe_block->event_info[gpe_block->gpe_count] >
gpe_event_info)) {
return (TRUE);
}
gpe_block = gpe_block->next;
}
gpe_xrupt_block = gpe_xrupt_block->next;
}
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_device
*
* PARAMETERS: GPE_WALK_CALLBACK
*
* RETURN: Status
*
* DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
* block device. NULL if the GPE is one of the FADT-defined GPEs.
*
******************************************************************************/
acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context)
{
struct acpi_gpe_device_info *info = context;
/* Increment Index by the number of GPEs in this block */
info->next_block_base_index += gpe_block->gpe_count;
if (info->index < info->next_block_base_index) {
/*
* The GPE index is within this block, get the node. Leave the node
* NULL for the FADT-defined GPEs
*/
if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
info->gpe_device = gpe_block->node;
}
info->status = AE_OK;
return (AE_CTRL_END);
}
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_xrupt_block
*
* PARAMETERS: interrupt_number - Interrupt for a GPE block
*
* RETURN: A GPE interrupt block
*
* DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
* block per unique interrupt level used for GPEs. Should be
* called only when the GPE lists are semaphore locked and not
* subject to change.
*
******************************************************************************/
struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
/* No need for lock since we are not changing any list elements here */
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt) {
if (next_gpe_xrupt->interrupt_number == interrupt_number) {
return_PTR(next_gpe_xrupt);
}
next_gpe_xrupt = next_gpe_xrupt->next;
}
/* Not found, must allocate a new xrupt descriptor */
gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
if (!gpe_xrupt) {
return_PTR(NULL);
}
gpe_xrupt->interrupt_number = interrupt_number;
/* Install new interrupt descriptor with spin lock */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
if (acpi_gbl_gpe_xrupt_list_head) {
next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
while (next_gpe_xrupt->next) {
next_gpe_xrupt = next_gpe_xrupt->next;
}
next_gpe_xrupt->next = gpe_xrupt;
gpe_xrupt->previous = next_gpe_xrupt;
} else {
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Install new interrupt handler if not SCI_INT */
if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
status = acpi_os_install_interrupt_handler(interrupt_number,
acpi_ev_gpe_xrupt_handler,
gpe_xrupt);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not install GPE interrupt handler at level 0x%X",
interrupt_number));
return_PTR(NULL);
}
}
return_PTR(gpe_xrupt);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_delete_gpe_xrupt
*
* PARAMETERS: gpe_xrupt - A GPE interrupt info block
*
* RETURN: Status
*
* DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
* interrupt handler if not the SCI interrupt.
*
******************************************************************************/
acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
{
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
/* We never want to remove the SCI interrupt handler */
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
gpe_xrupt->gpe_block_list_head = NULL;
return_ACPI_STATUS(AE_OK);
}
/* Disable this interrupt */
status =
acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
acpi_ev_gpe_xrupt_handler);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Unlink the interrupt block with lock */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
if (gpe_xrupt->previous) {
gpe_xrupt->previous->next = gpe_xrupt->next;
} else {
/* No previous, update list head */
acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
}
if (gpe_xrupt->next) {
gpe_xrupt->next->previous = gpe_xrupt->previous;
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
/* Free the block */
ACPI_FREE(gpe_xrupt);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_delete_gpe_handlers
*
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
* gpe_block - Gpe Block info
*
* RETURN: Status
*
* DESCRIPTION: Delete all Handler objects found in the GPE data structs.
* Used only prior to termination.
*
******************************************************************************/
acpi_status
acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
u32 i;
u32 j;
ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
/* Examine each GPE Register within the block */
for (i = 0; i < gpe_block->register_count; i++) {
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
gpe_event_info = &gpe_block->event_info[((acpi_size) i *
ACPI_GPE_REGISTER_WIDTH)
+ j];
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
}
}
}
return_ACPI_STATUS(AE_OK);
}
| gpl-2.0 |
sultanxda/sultan-kernel-celox | drivers/acpi/acpica/tbfind.c | 3228 | 4828 | /******************************************************************************
*
* Module Name: tbfind - find table
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbfind")
/*******************************************************************************
*
* FUNCTION: acpi_tb_find_table
*
* PARAMETERS: Signature - String with ACPI table signature
* oem_id - String with the table OEM ID
* oem_table_id - String with the OEM Table ID
* table_index - Where the table index is returned
*
* RETURN: Status and table index
*
* DESCRIPTION: Find an ACPI table (in the RSDT/XSDT) that matches the
* Signature, OEM ID and OEM Table ID. Returns an index that can
* be used to get the table header or entire table.
*
******************************************************************************/
acpi_status
acpi_tb_find_table(char *signature,
char *oem_id, char *oem_table_id, u32 *table_index)
{
u32 i;
acpi_status status;
struct acpi_table_header header;
ACPI_FUNCTION_TRACE(tb_find_table);
/* Normalize the input strings */
ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
/* Search for the table */
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
header.signature, ACPI_NAME_SIZE)) {
/* Not the requested table */
continue;
}
/* Table with matching signature has been found */
if (!acpi_gbl_root_table_list.tables[i].pointer) {
/* Table is not currently mapped, map it */
status =
acpi_tb_verify_table(&acpi_gbl_root_table_list.
tables[i]);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (!acpi_gbl_root_table_list.tables[i].pointer) {
continue;
}
}
/* Check for table match on all IDs */
if (!ACPI_MEMCMP
(acpi_gbl_root_table_list.tables[i].pointer->signature,
header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
||
!ACPI_MEMCMP
(acpi_gbl_root_table_list.
tables[i].pointer->
oem_id,
header.oem_id,
ACPI_OEM_ID_SIZE))
&& (!oem_table_id[0]
|| !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
pointer->oem_table_id,
header.oem_table_id,
ACPI_OEM_TABLE_ID_SIZE))) {
*table_index = i;
ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
"Found table [%4.4s]\n",
header.signature));
return_ACPI_STATUS(AE_OK);
}
}
return_ACPI_STATUS(AE_NOT_FOUND);
}
| gpl-2.0 |
wangxingchao/s3c6410 | arch/powerpc/mm/mmu_context_hash32.c | 3740 | 3445 | /*
* This file contains the routines for handling the MMU on those
* PowerPC implementations where the MMU substantially follows the
* architecture specification. This includes the 6xx, 7xx, 7xxx,
* 8260, and POWER3 implementations but excludes the 8xx and 4xx.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
* we only use 32,768 of them. That is ample, since there can be
* at most around 30,000 tasks in the system anyway, and it means
* that we can use a bitmap to indicate which contexts are in use.
* Using a bitmap means that we entirely avoid all of the problems
* that we used to have when the context number overflowed,
* particularly on SMP systems.
* -- paulus.
*/
#define NO_CONTEXT ((unsigned long) -1)
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
/*
* This function defines the mapping from contexts to VSIDs (virtual
* segment IDs). We use a skew on both the context and the high 4 bits
* of the 32-bit virtual address (the "effective segment ID") in order
* to spread out the entries in the MMU hash table. Note, if this
* function is changed then arch/ppc/mm/hashtable.S will have to be
* changed to correspond.
*
*
* CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
* & 0xffffff)
*/
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
unsigned long __init_new_context(void)
{
unsigned long ctx = next_mmu_context;
while (test_and_set_bit(ctx, context_map)) {
ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
if (ctx > LAST_CONTEXT)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
return ctx;
}
EXPORT_SYMBOL_GPL(__init_new_context);
/*
* Set up the context for a new address space.
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
mm->context.id = __init_new_context();
return 0;
}
/*
* Free a context ID. Make sure to call this with preempt disabled!
*/
void __destroy_context(unsigned long ctx)
{
clear_bit(ctx, context_map);
}
EXPORT_SYMBOL_GPL(__destroy_context);
/*
* We're finished using the context for an address space.
*/
void destroy_context(struct mm_struct *mm)
{
preempt_disable();
if (mm->context.id != NO_CONTEXT) {
__destroy_context(mm->context.id);
mm->context.id = NO_CONTEXT;
}
preempt_enable();
}
/*
* Initialize the context management stuff.
*/
void __init mmu_context_init(void)
{
/* Reserve context 0 for kernel use */
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
}
| gpl-2.0 |
ZeroInfinityXDA/OQC-m9 | arch/arm/mach-ixp4xx/ixp4xx_qmgr.c | 4764 | 9168 | /*
* Intel IXP4xx Queue Manager driver for Linux
*
* Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <mach/qmgr.h>
static struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
static struct resource *mem_res;
static spinlock_t qmgr_lock;
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
static void (*irq_handlers[QUEUES])(void *pdev);
static void *irq_pdevs[QUEUES];
#if DEBUG_QMGR
char qmgr_queue_descs[QUEUES][32];
#endif
void qmgr_set_irq(unsigned int queue, int src,
void (*handler)(void *pdev), void *pdev)
{
unsigned long flags;
spin_lock_irqsave(&qmgr_lock, flags);
if (queue < HALF_QUEUES) {
u32 __iomem *reg;
int bit;
BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
reg);
} else
/* IRQ source for queues 32-63 is fixed */
BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
irq_handlers[queue] = handler;
irq_pdevs[queue] = pdev;
spin_unlock_irqrestore(&qmgr_lock, flags);
}
static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
{
int i, ret = 0;
u32 en_bitmap, src, stat;
/* ACK - it may clear any bits so don't rely on it */
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
en_bitmap = qmgr_regs->irqen[0];
while (en_bitmap) {
i = __fls(en_bitmap); /* number of the last "low" queue */
en_bitmap &= ~BIT(i);
src = qmgr_regs->irqsrc[i >> 3];
stat = qmgr_regs->stat1[i >> 3];
if (src & 4) /* the IRQ condition is inverted */
stat = ~stat;
if (stat & BIT(src & 3)) {
irq_handlers[i](irq_pdevs[i]);
ret = IRQ_HANDLED;
}
}
return ret;
}
static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
{
int i, ret = 0;
u32 req_bitmap;
/* ACK - it may clear any bits so don't rely on it */
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
while (req_bitmap) {
i = __fls(req_bitmap); /* number of the last "high" queue */
req_bitmap &= ~BIT(i);
irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
ret = IRQ_HANDLED;
}
return ret;
}
static irqreturn_t qmgr_irq(int irq, void *pdev)
{
int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
if (!req_bitmap)
return 0;
__raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
while (req_bitmap) {
i = __fls(req_bitmap); /* number of the last queue */
req_bitmap &= ~BIT(i);
i += half * HALF_QUEUES;
irq_handlers[i](irq_pdevs[i]);
}
return IRQ_HANDLED;
}
void qmgr_enable_irq(unsigned int queue)
{
unsigned long flags;
int half = queue / 32;
u32 mask = 1 << (queue & (HALF_QUEUES - 1));
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
&qmgr_regs->irqen[half]);
spin_unlock_irqrestore(&qmgr_lock, flags);
}
void qmgr_disable_irq(unsigned int queue)
{
unsigned long flags;
int half = queue / 32;
u32 mask = 1 << (queue & (HALF_QUEUES - 1));
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
&qmgr_regs->irqen[half]);
__raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
spin_unlock_irqrestore(&qmgr_lock, flags);
}
static inline void shift_mask(u32 *mask)
{
mask[3] = mask[3] << 1 | mask[2] >> 31;
mask[2] = mask[2] << 1 | mask[1] >> 31;
mask[1] = mask[1] << 1 | mask[0] >> 31;
mask[0] <<= 1;
}
#if DEBUG_QMGR
int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
unsigned int nearly_empty_watermark,
unsigned int nearly_full_watermark,
const char *desc_format, const char* name)
#else
int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
unsigned int nearly_empty_watermark,
unsigned int nearly_full_watermark)
#endif
{
u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
int err;
BUG_ON(queue >= QUEUES);
if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
return -EINVAL;
switch (len) {
case 16:
cfg = 0 << 24;
mask[0] = 0x1;
break;
case 32:
cfg = 1 << 24;
mask[0] = 0x3;
break;
case 64:
cfg = 2 << 24;
mask[0] = 0xF;
break;
case 128:
cfg = 3 << 24;
mask[0] = 0xFF;
break;
default:
return -EINVAL;
}
cfg |= nearly_empty_watermark << 26;
cfg |= nearly_full_watermark << 29;
len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
mask[1] = mask[2] = mask[3] = 0;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
spin_lock_irq(&qmgr_lock);
if (__raw_readl(&qmgr_regs->sram[queue])) {
err = -EBUSY;
goto err;
}
while (1) {
if (!(used_sram_bitmap[0] & mask[0]) &&
!(used_sram_bitmap[1] & mask[1]) &&
!(used_sram_bitmap[2] & mask[2]) &&
!(used_sram_bitmap[3] & mask[3]))
break; /* found free space */
addr++;
shift_mask(mask);
if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
printk(KERN_ERR "qmgr: no free SRAM space for"
" queue %i\n", queue);
err = -ENOMEM;
goto err;
}
}
used_sram_bitmap[0] |= mask[0];
used_sram_bitmap[1] |= mask[1];
used_sram_bitmap[2] |= mask[2];
used_sram_bitmap[3] |= mask[3];
__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
#if DEBUG_QMGR
snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
desc_format, name);
printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
qmgr_queue_descs[queue], queue, addr);
#endif
spin_unlock_irq(&qmgr_lock);
return 0;
err:
spin_unlock_irq(&qmgr_lock);
module_put(THIS_MODULE);
return err;
}
void qmgr_release_queue(unsigned int queue)
{
u32 cfg, addr, mask[4];
BUG_ON(queue >= QUEUES); /* not in valid range */
spin_lock_irq(&qmgr_lock);
cfg = __raw_readl(&qmgr_regs->sram[queue]);
addr = (cfg >> 14) & 0xFF;
BUG_ON(!addr); /* not requested */
switch ((cfg >> 24) & 3) {
case 0: mask[0] = 0x1; break;
case 1: mask[0] = 0x3; break;
case 2: mask[0] = 0xF; break;
case 3: mask[0] = 0xFF; break;
}
mask[1] = mask[2] = mask[3] = 0;
while (addr--)
shift_mask(mask);
#if DEBUG_QMGR
printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
qmgr_queue_descs[queue], queue);
qmgr_queue_descs[queue][0] = '\x0';
#endif
while ((addr = qmgr_get_entry(queue)))
printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
queue, addr);
__raw_writel(0, &qmgr_regs->sram[queue]);
used_sram_bitmap[0] &= ~mask[0];
used_sram_bitmap[1] &= ~mask[1];
used_sram_bitmap[2] &= ~mask[2];
used_sram_bitmap[3] &= ~mask[3];
irq_handlers[queue] = NULL; /* catch IRQ bugs */
spin_unlock_irq(&qmgr_lock);
module_put(THIS_MODULE);
}
static int qmgr_init(void)
{
int i, err;
irq_handler_t handler1, handler2;
mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
IXP4XX_QMGR_REGION_SIZE,
"IXP4xx Queue Manager");
if (mem_res == NULL)
return -EBUSY;
/* reset qmgr registers */
for (i = 0; i < 4; i++) {
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
__raw_writel(0, &qmgr_regs->irqsrc[i]);
}
for (i = 0; i < 2; i++) {
__raw_writel(0, &qmgr_regs->stat2[i]);
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
__raw_writel(0, &qmgr_regs->irqen[i]);
}
__raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
__raw_writel(0, &qmgr_regs->statf_h);
for (i = 0; i < QUEUES; i++)
__raw_writel(0, &qmgr_regs->sram[i]);
if (cpu_is_ixp42x_rev_a0()) {
handler1 = qmgr_irq1_a0;
handler2 = qmgr_irq2_a0;
} else
handler1 = handler2 = qmgr_irq;
err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
NULL);
if (err) {
printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
IRQ_IXP4XX_QM1, err);
goto error_irq;
}
err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
NULL);
if (err) {
printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
IRQ_IXP4XX_QM2, err);
goto error_irq2;
}
used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
spin_lock_init(&qmgr_lock);
printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
return 0;
error_irq2:
free_irq(IRQ_IXP4XX_QM1, NULL);
error_irq:
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
return err;
}
static void qmgr_remove(void)
{
free_irq(IRQ_IXP4XX_QM1, NULL);
free_irq(IRQ_IXP4XX_QM2, NULL);
synchronize_irq(IRQ_IXP4XX_QM1);
synchronize_irq(IRQ_IXP4XX_QM2);
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
}
module_init(qmgr_init);
module_exit(qmgr_remove);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
EXPORT_SYMBOL(qmgr_set_irq);
EXPORT_SYMBOL(qmgr_enable_irq);
EXPORT_SYMBOL(qmgr_disable_irq);
#if DEBUG_QMGR
EXPORT_SYMBOL(qmgr_queue_descs);
EXPORT_SYMBOL(qmgr_request_queue);
#else
EXPORT_SYMBOL(__qmgr_request_queue);
#endif
EXPORT_SYMBOL(qmgr_release_queue);
| gpl-2.0 |
keiranFTW/semc-kernel-msm7x30-ics | drivers/isdn/hisax/asuscom.c | 5020 | 11573 | /* $Id: asuscom.c,v 1.14.2.4 2004/01/13 23:48:39 keil Exp $
*
* low level stuff for ASUSCOM NETWORK INC. ISDNLink cards
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Thanks to ASUSCOM NETWORK INC. Taiwan and Dynalink NL for information
*
*/
#include <linux/init.h>
#include <linux/isapnp.h>
#include "hisax.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isdnl1.h"
static const char *Asuscom_revision = "$Revision: 1.14.2.4 $";
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
#define ASUS_ISAC 0
#define ASUS_HSCX 1
#define ASUS_ADR 2
#define ASUS_CTRL_U7 3
#define ASUS_CTRL_POTS 5
#define ASUS_IPAC_ALE 0
#define ASUS_IPAC_DATA 1
#define ASUS_ISACHSCX 1
#define ASUS_IPAC 2
/* CARD_ADR (Write) */
#define ASUS_RESET 0x80 /* Bit 7 Reset-Leitung */
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
byteout(ale, off);
ret = bytein(adr);
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
insb(adr, data, size);
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
byteout(ale, off);
byteout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0, data, size);
}
static u_char
ReadISAC_IPAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.asus.adr, cs->hw.asus.isac, offset|0x80));
}
static void
WriteISAC_IPAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.asus.adr, cs->hw.asus.isac, offset|0x80, value);
}
static void
ReadISACfifo_IPAC(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size);
}
static void
WriteISACfifo_IPAC(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.asus.adr, cs->hw.asus.isac, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.asus.adr,
cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.asus.adr,
cs->hw.asus.hscx, offset + (hscx ? 0x40 : 0), value);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.asus.adr, \
cs->hw.asus.hscx, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.asus.adr, \
cs->hw.asus.hscx, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.asus.adr, \
cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.asus.adr, \
cs->hw.asus.hscx, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
asuscom_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
if (val) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA);
if (val) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0xFF);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_MASK, 0x0);
writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK, 0x0);
writereg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_MASK + 0x40, 0x0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static irqreturn_t
asuscom_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char ista, val, icnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA);
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.asus.adr, cs->hw.asus.hscx, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val)
hscx_int_main(cs, val);
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.asus.adr, cs->hw.asus.isac, ISAC_ISTA | 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
printk(KERN_WARNING "ASUS IRQ LOOP\n");
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xFF);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_asuscom(struct IsdnCardState *cs)
{
int bytecnt = 8;
if (cs->hw.asus.cfg_reg)
release_region(cs->hw.asus.cfg_reg, bytecnt);
}
static void
reset_asuscom(struct IsdnCardState *cs)
{
if (cs->subtyp == ASUS_IPAC)
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x20);
else
byteout(cs->hw.asus.adr, ASUS_RESET); /* Reset On */
mdelay(10);
if (cs->subtyp == ASUS_IPAC)
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_POTA2, 0x0);
else
byteout(cs->hw.asus.adr, 0); /* Reset Off */
mdelay(10);
if (cs->subtyp == ASUS_IPAC) {
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_CONF, 0x0);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_ACFG, 0xff);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_AOE, 0x0);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_MASK, 0xc0);
writereg(cs->hw.asus.adr, cs->hw.asus.isac, IPAC_PCFG, 0x12);
}
}
static int
Asus_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_asuscom(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_asuscom(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
cs->debug |= L1_DEB_IPAC;
inithscxisac(cs, 3);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
return(0);
}
return(0);
}
#ifdef __ISAPNP__
static struct isapnp_device_id asus_ids[] __devinitdata = {
{ ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1688),
(unsigned long) "Asus1688 PnP" },
{ ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690),
ISAPNP_VENDOR('A', 'S', 'U'), ISAPNP_FUNCTION(0x1690),
(unsigned long) "Asus1690 PnP" },
{ ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020),
ISAPNP_VENDOR('S', 'I', 'E'), ISAPNP_FUNCTION(0x0020),
(unsigned long) "Isurf2 PnP" },
{ ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000),
ISAPNP_VENDOR('E', 'L', 'F'), ISAPNP_FUNCTION(0x0000),
(unsigned long) "Iscas TE320" },
{ 0, }
};
static struct isapnp_device_id *ipid __devinitdata = &asus_ids[0];
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_asuscom(struct IsdnCard *card)
{
int bytecnt;
struct IsdnCardState *cs = card->cs;
u_char val;
char tmp[64];
strcpy(tmp, Asuscom_revision);
printk(KERN_INFO "HiSax: Asuscom ISDNLink driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_ASUSCOM)
return (0);
#ifdef __ISAPNP__
if (!card->para[1] && isapnp_present()) {
struct pnp_dev *pnp_d;
while(ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
printk(KERN_ERR "AsusPnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
return(0);
}
break;
} else {
printk(KERN_ERR "AsusPnP: PnP error card found, no device\n");
}
}
ipid++;
pnp_c = NULL;
}
if (!ipid->card_vendor) {
printk(KERN_INFO "AsusPnP: no ISAPnP card found\n");
return(0);
}
}
#endif
bytecnt = 8;
cs->hw.asus.cfg_reg = card->para[1];
cs->irq = card->para[0];
if (!request_region(cs->hw.asus.cfg_reg, bytecnt, "asuscom isdn")) {
printk(KERN_WARNING
"HiSax: ISDNLink config port %x-%x already in use\n",
cs->hw.asus.cfg_reg,
cs->hw.asus.cfg_reg + bytecnt);
return (0);
}
printk(KERN_INFO "ISDNLink: defined at 0x%x IRQ %d\n",
cs->hw.asus.cfg_reg, cs->irq);
setup_isac(cs);
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &Asus_card_msg;
val = readreg(cs->hw.asus.cfg_reg + ASUS_IPAC_ALE,
cs->hw.asus.cfg_reg + ASUS_IPAC_DATA, IPAC_ID);
if ((val == 1) || (val == 2)) {
cs->subtyp = ASUS_IPAC;
cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_IPAC_ALE;
cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA;
cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_IPAC_DATA;
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->readisac = &ReadISAC_IPAC;
cs->writeisac = &WriteISAC_IPAC;
cs->readisacfifo = &ReadISACfifo_IPAC;
cs->writeisacfifo = &WriteISACfifo_IPAC;
cs->irq_func = &asuscom_interrupt_ipac;
printk(KERN_INFO "Asus: IPAC version %x\n", val);
} else {
cs->subtyp = ASUS_ISACHSCX;
cs->hw.asus.adr = cs->hw.asus.cfg_reg + ASUS_ADR;
cs->hw.asus.isac = cs->hw.asus.cfg_reg + ASUS_ISAC;
cs->hw.asus.hscx = cs->hw.asus.cfg_reg + ASUS_HSCX;
cs->hw.asus.u7 = cs->hw.asus.cfg_reg + ASUS_CTRL_U7;
cs->hw.asus.pots = cs->hw.asus.cfg_reg + ASUS_CTRL_POTS;
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->irq_func = &asuscom_interrupt;
ISACVersion(cs, "ISDNLink:");
if (HscxVersion(cs, "ISDNLink:")) {
printk(KERN_WARNING
"ISDNLink: wrong HSCX versions check IO address\n");
release_io_asuscom(cs);
return (0);
}
}
return (1);
}
| gpl-2.0 |
forumber/android_kernel_zte_bladevec4g | net/sched/sch_mqprio.c | 5020 | 10502 | /*
* net/sched/sch_mqprio.c
*
* Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sch_generic.h>
struct mqprio_sched {
struct Qdisc **qdiscs;
int hw_owned;
};
static void mqprio_destroy(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
unsigned int ntx;
if (priv->qdiscs) {
for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
ntx++)
qdisc_destroy(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
dev->netdev_ops->ndo_setup_tc(dev, 0);
else
netdev_set_num_tc(dev, 0);
}
static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
{
int i, j;
/* Verify num_tc is not out of max range */
if (qopt->num_tc > TC_MAX_QUEUE)
return -EINVAL;
/* Verify priority mapping uses valid tcs */
for (i = 0; i < TC_BITMASK + 1; i++) {
if (qopt->prio_tc_map[i] >= qopt->num_tc)
return -EINVAL;
}
/* net_device does not support requested operation */
if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
return -EINVAL;
/* if hw owned qcount and qoffset are taken from LLD so
* no reason to verify them here
*/
if (qopt->hw)
return 0;
for (i = 0; i < qopt->num_tc; i++) {
unsigned int last = qopt->offset[i] + qopt->count[i];
/* Verify the queue count is in tx range being equal to the
* real_num_tx_queues indicates the last queue is in use.
*/
if (qopt->offset[i] >= dev->real_num_tx_queues ||
!qopt->count[i] ||
last > dev->real_num_tx_queues)
return -EINVAL;
/* Verify that the offset and counts do not overlap */
for (j = i + 1; j < qopt->num_tc; j++) {
if (last > qopt->offset[j])
return -EINVAL;
}
}
return 0;
}
static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
struct netdev_queue *dev_queue;
struct Qdisc *qdisc;
int i, err = -EOPNOTSUPP;
struct tc_mqprio_qopt *qopt = NULL;
BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
if (sch->parent != TC_H_ROOT)
return -EOPNOTSUPP;
if (!netif_is_multiqueue(dev))
return -EOPNOTSUPP;
if (!opt || nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
if (mqprio_parse_opt(dev, qopt))
return -EINVAL;
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
if (priv->qdiscs == NULL) {
err = -ENOMEM;
goto err;
}
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
if (qdisc == NULL) {
err = -ENOMEM;
goto err;
}
priv->qdiscs[i] = qdisc;
}
/* If the mqprio options indicate that hardware should own
* the queue mapping then run ndo_setup_tc otherwise use the
* supplied and verified mapping
*/
if (qopt->hw) {
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
if (err)
goto err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
netdev_set_tc_queue(dev, i,
qopt->count[i], qopt->offset[i]);
}
/* Always use supplied priority mappings */
for (i = 0; i < TC_BITMASK + 1; i++)
netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
sch->flags |= TCQ_F_MQROOT;
return 0;
err:
mqprio_destroy(sch);
return err;
}
static void mqprio_attach(struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
struct Qdisc *qdisc;
unsigned int ntx;
/* Attach underlying qdisc */
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
qdisc = priv->qdiscs[ntx];
qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
if (qdisc)
qdisc_destroy(qdisc);
}
kfree(priv->qdiscs);
priv->qdiscs = NULL;
}
static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
unsigned long cl)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
if (ntx >= dev->num_tx_queues)
return NULL;
return netdev_get_tx_queue(dev, ntx);
}
static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
struct Qdisc **old)
{
struct net_device *dev = qdisc_dev(sch);
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
if (!dev_queue)
return -EINVAL;
if (dev->flags & IFF_UP)
dev_deactivate(dev);
*old = dev_graft_qdisc(dev_queue, new);
if (dev->flags & IFF_UP)
dev_activate(dev);
return 0;
}
static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct net_device *dev = qdisc_dev(sch);
struct mqprio_sched *priv = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_mqprio_qopt opt = { 0 };
struct Qdisc *qdisc;
unsigned int i;
sch->q.qlen = 0;
memset(&sch->bstats, 0, sizeof(sch->bstats));
memset(&sch->qstats, 0, sizeof(sch->qstats));
for (i = 0; i < dev->num_tx_queues; i++) {
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
spin_lock_bh(qdisc_lock(qdisc));
sch->q.qlen += qdisc->q.qlen;
sch->bstats.bytes += qdisc->bstats.bytes;
sch->bstats.packets += qdisc->bstats.packets;
sch->qstats.qlen += qdisc->qstats.qlen;
sch->qstats.backlog += qdisc->qstats.backlog;
sch->qstats.drops += qdisc->qstats.drops;
sch->qstats.requeues += qdisc->qstats.requeues;
sch->qstats.overlimits += qdisc->qstats.overlimits;
spin_unlock_bh(qdisc_lock(qdisc));
}
opt.num_tc = netdev_get_num_tc(dev);
memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
opt.hw = priv->hw_owned;
for (i = 0; i < netdev_get_num_tc(dev); i++) {
opt.count[i] = dev->tc_to_txq[i].count;
opt.offset[i] = dev->tc_to_txq[i].offset;
}
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
{
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
if (!dev_queue)
return NULL;
return dev_queue->qdisc_sleeping;
}
static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
{
struct net_device *dev = qdisc_dev(sch);
unsigned int ntx = TC_H_MIN(classid);
if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
return 0;
return ntx;
}
static void mqprio_put(struct Qdisc *sch, unsigned long cl)
{
}
static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct net_device *dev = qdisc_dev(sch);
if (cl <= netdev_get_num_tc(dev)) {
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_info = 0;
} else {
int i;
struct netdev_queue *dev_queue;
dev_queue = mqprio_queue_get(sch, cl);
tcm->tcm_parent = 0;
for (i = 0; i < netdev_get_num_tc(dev); i++) {
struct netdev_tc_txq tc = dev->tc_to_txq[i];
int q_idx = cl - netdev_get_num_tc(dev);
if (q_idx > tc.offset &&
q_idx <= tc.offset + tc.count) {
tcm->tcm_parent =
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1));
break;
}
}
tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
}
tcm->tcm_handle |= TC_H_MIN(cl);
return 0;
}
static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
__releases(d->lock)
__acquires(d->lock)
{
struct net_device *dev = qdisc_dev(sch);
if (cl <= netdev_get_num_tc(dev)) {
int i;
struct Qdisc *qdisc;
struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_packed bstats = {0};
struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
/* Drop lock here it will be reclaimed before touching
* statistics this is required because the d->lock we
* hold here is the look on dev_queue->qdisc_sleeping
* also acquired below.
*/
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) {
qdisc = netdev_get_tx_queue(dev, i)->qdisc;
spin_lock_bh(qdisc_lock(qdisc));
bstats.bytes += qdisc->bstats.bytes;
bstats.packets += qdisc->bstats.packets;
qstats.qlen += qdisc->qstats.qlen;
qstats.backlog += qdisc->qstats.backlog;
qstats.drops += qdisc->qstats.drops;
qstats.requeues += qdisc->qstats.requeues;
qstats.overlimits += qdisc->qstats.overlimits;
spin_unlock_bh(qdisc_lock(qdisc));
}
/* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock);
if (gnet_stats_copy_basic(d, &bstats) < 0 ||
gnet_stats_copy_queue(d, &qstats) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
sch->qstats.qlen = sch->q.qlen;
if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, &sch->qstats) < 0)
return -1;
}
return 0;
}
static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct net_device *dev = qdisc_dev(sch);
unsigned long ntx;
if (arg->stop)
return;
/* Walk hierarchy with a virtual class per tc */
arg->count = arg->skip;
for (ntx = arg->skip;
ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
ntx++) {
if (arg->fn(sch, ntx + 1, arg) < 0) {
arg->stop = 1;
break;
}
arg->count++;
}
}
static const struct Qdisc_class_ops mqprio_class_ops = {
.graft = mqprio_graft,
.leaf = mqprio_leaf,
.get = mqprio_get,
.put = mqprio_put,
.walk = mqprio_walk,
.dump = mqprio_dump_class,
.dump_stats = mqprio_dump_class_stats,
};
static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
.cl_ops = &mqprio_class_ops,
.id = "mqprio",
.priv_size = sizeof(struct mqprio_sched),
.init = mqprio_init,
.destroy = mqprio_destroy,
.attach = mqprio_attach,
.dump = mqprio_dump,
.owner = THIS_MODULE,
};
static int __init mqprio_module_init(void)
{
return register_qdisc(&mqprio_qdisc_ops);
}
static void __exit mqprio_module_exit(void)
{
unregister_qdisc(&mqprio_qdisc_ops);
}
module_init(mqprio_module_init);
module_exit(mqprio_module_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
wanam/Adam-Kernel-GS5-LTE | drivers/xen/gntalloc.c | 5020 | 15691 | /******************************************************************************
* gntalloc.c
*
* Device for creating grant references (in user-space) that may be shared
* with other domains.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* This driver exists to allow userspace programs in Linux to allocate kernel
* memory that will later be shared with another domain. Without this device,
* Linux userspace programs cannot create grant references.
*
* How this stuff works:
* X -> granting a page to Y
* Y -> mapping the grant from X
*
* 1. X uses the gntalloc device to allocate a page of kernel memory, P.
* 2. X creates an entry in the grant table that says domid(Y) can access P.
* This is done without a hypercall unless the grant table needs expansion.
* 3. X gives the grant reference identifier, GREF, to Y.
* 4. Y maps the page, either directly into kernel memory for use in a backend
* driver, or via a the gntdev device to map into the address space of an
* application running in Y. This is the first point at which Xen does any
* tracking of the page.
* 5. A program in X mmap()s a segment of the gntalloc device that corresponds
* to the shared page, and can now communicate with Y over the shared page.
*
*
* NOTE TO USERSPACE LIBRARIES:
* The grant allocation and mmap()ing are, naturally, two separate operations.
* You set up the sharing by calling the create ioctl() and then the mmap().
* Teardown requires munmap() and either close() or ioctl().
*
* WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
* reference, this device can be used to consume kernel memory by leaving grant
* references mapped by another domain when an application exits. Therefore,
* there is a global limit on the number of pages that can be allocated. When
* all references to the page are unmapped, it will be freed during the next
* grant operation.
*/
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/gntalloc.h>
#include <xen/events.h>
static int limit = 1024;
module_param(limit, int, 0644);
MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
"the gntalloc device");
static LIST_HEAD(gref_list);
static DEFINE_MUTEX(gref_mutex);
static int gref_size;
struct notify_info {
uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
int event; /* Port (event channel) to notify */
};
/* Metadata on a grant reference. */
struct gntalloc_gref {
struct list_head next_gref; /* list entry gref_list */
struct list_head next_file; /* list entry file->list, if open */
struct page *page; /* The shared page */
uint64_t file_index; /* File offset for mmap() */
unsigned int users; /* Use count - when zero, waiting on Xen */
grant_ref_t gref_id; /* The grant reference number */
struct notify_info notify; /* Unmap notification */
};
struct gntalloc_file_private_data {
struct list_head list;
uint64_t index;
};
struct gntalloc_vma_private_data {
struct gntalloc_gref *gref;
int users;
int count;
};
static void __del_gref(struct gntalloc_gref *gref);
static void do_cleanup(void)
{
struct gntalloc_gref *gref, *n;
list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
if (!gref->users)
__del_gref(gref);
}
}
static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
{
int i, rc, readonly;
LIST_HEAD(queue_gref);
LIST_HEAD(queue_file);
struct gntalloc_gref *gref;
readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
rc = -ENOMEM;
for (i = 0; i < op->count; i++) {
gref = kzalloc(sizeof(*gref), GFP_KERNEL);
if (!gref)
goto undo;
list_add_tail(&gref->next_gref, &queue_gref);
list_add_tail(&gref->next_file, &queue_file);
gref->users = 1;
gref->file_index = op->index + i * PAGE_SIZE;
gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!gref->page)
goto undo;
/* Grant foreign access to the page. */
gref->gref_id = gnttab_grant_foreign_access(op->domid,
pfn_to_mfn(page_to_pfn(gref->page)), readonly);
if ((int)gref->gref_id < 0) {
rc = gref->gref_id;
goto undo;
}
gref_ids[i] = gref->gref_id;
}
/* Add to gref lists. */
mutex_lock(&gref_mutex);
list_splice_tail(&queue_gref, &gref_list);
list_splice_tail(&queue_file, &priv->list);
mutex_unlock(&gref_mutex);
return 0;
undo:
mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) {
/* __del_gref does not remove from queue_file */
__del_gref(gref);
}
/* It's possible for the target domain to map the just-allocated grant
* references by blindly guessing their IDs; if this is done, then
* __del_gref will leave them in the queue_gref list. They need to be
* added to the global list so that we can free them when they are no
* longer referenced.
*/
if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list);
mutex_unlock(&gref_mutex);
return rc;
}
static void __del_gref(struct gntalloc_gref *gref)
{
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap(gref->page);
tmp[gref->notify.pgoff] = 0;
kunmap(gref->page);
}
if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
evtchn_put(gref->notify.event);
}
gref->notify.flags = 0;
if (gref->gref_id > 0) {
if (gnttab_query_foreign_access(gref->gref_id))
return;
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
return;
gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
list_del(&gref->next_gref);
if (gref->page)
__free_page(gref->page);
kfree(gref);
}
/* finds contiguous grant references in a file, returns the first */
static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
uint64_t index, uint32_t count)
{
struct gntalloc_gref *rv = NULL, *gref;
list_for_each_entry(gref, &priv->list, next_file) {
if (gref->file_index == index && !rv)
rv = gref;
if (rv) {
if (gref->file_index != index)
return NULL;
index += PAGE_SIZE;
count--;
if (count == 0)
return rv;
}
}
return NULL;
}
/*
* -------------------------------------
* File operations.
* -------------------------------------
*/
static int gntalloc_open(struct inode *inode, struct file *filp)
{
struct gntalloc_file_private_data *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto out_nomem;
INIT_LIST_HEAD(&priv->list);
filp->private_data = priv;
pr_debug("%s: priv %p\n", __func__, priv);
return 0;
out_nomem:
return -ENOMEM;
}
static int gntalloc_release(struct inode *inode, struct file *filp)
{
struct gntalloc_file_private_data *priv = filp->private_data;
struct gntalloc_gref *gref;
pr_debug("%s: priv %p\n", __func__, priv);
mutex_lock(&gref_mutex);
while (!list_empty(&priv->list)) {
gref = list_entry(priv->list.next,
struct gntalloc_gref, next_file);
list_del(&gref->next_file);
gref->users--;
if (gref->users == 0)
__del_gref(gref);
}
kfree(priv);
mutex_unlock(&gref_mutex);
return 0;
}
static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
struct ioctl_gntalloc_alloc_gref __user *arg)
{
int rc = 0;
struct ioctl_gntalloc_alloc_gref op;
uint32_t *gref_ids;
pr_debug("%s: priv %p\n", __func__, priv);
if (copy_from_user(&op, arg, sizeof(op))) {
rc = -EFAULT;
goto out;
}
gref_ids = kcalloc(op.count, sizeof(gref_ids[0]), GFP_TEMPORARY);
if (!gref_ids) {
rc = -ENOMEM;
goto out;
}
mutex_lock(&gref_mutex);
/* Clean up pages that were at zero (local) users but were still mapped
* by remote domains. Since those pages count towards the limit that we
* are about to enforce, removing them here is a good idea.
*/
do_cleanup();
if (gref_size + op.count > limit) {
mutex_unlock(&gref_mutex);
rc = -ENOSPC;
goto out_free;
}
gref_size += op.count;
op.index = priv->index;
priv->index += op.count * PAGE_SIZE;
mutex_unlock(&gref_mutex);
rc = add_grefs(&op, gref_ids, priv);
if (rc < 0)
goto out_free;
/* Once we finish add_grefs, it is unsafe to touch the new reference,
* since it is possible for a concurrent ioctl to remove it (by guessing
* its index). If the userspace application doesn't provide valid memory
* to write the IDs to, then it will need to close the file in order to
* release - which it will do by segfaulting when it tries to access the
* IDs to close them.
*/
if (copy_to_user(arg, &op, sizeof(op))) {
rc = -EFAULT;
goto out_free;
}
if (copy_to_user(arg->gref_ids, gref_ids,
sizeof(gref_ids[0]) * op.count)) {
rc = -EFAULT;
goto out_free;
}
out_free:
kfree(gref_ids);
out:
return rc;
}
static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
void __user *arg)
{
int i, rc = 0;
struct ioctl_gntalloc_dealloc_gref op;
struct gntalloc_gref *gref, *n;
pr_debug("%s: priv %p\n", __func__, priv);
if (copy_from_user(&op, arg, sizeof(op))) {
rc = -EFAULT;
goto dealloc_grant_out;
}
mutex_lock(&gref_mutex);
gref = find_grefs(priv, op.index, op.count);
if (gref) {
/* Remove from the file list only, and decrease reference count.
* The later call to do_cleanup() will remove from gref_list and
* free the memory if the pages aren't mapped anywhere.
*/
for (i = 0; i < op.count; i++) {
n = list_entry(gref->next_file.next,
struct gntalloc_gref, next_file);
list_del(&gref->next_file);
gref->users--;
gref = n;
}
} else {
rc = -EINVAL;
}
do_cleanup();
mutex_unlock(&gref_mutex);
dealloc_grant_out:
return rc;
}
static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
void __user *arg)
{
struct ioctl_gntalloc_unmap_notify op;
struct gntalloc_gref *gref;
uint64_t index;
int pgoff;
int rc;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
index = op.index & ~(PAGE_SIZE - 1);
pgoff = op.index & (PAGE_SIZE - 1);
mutex_lock(&gref_mutex);
gref = find_grefs(priv, index, 1);
if (!gref) {
rc = -ENOENT;
goto unlock_out;
}
if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
rc = -EINVAL;
goto unlock_out;
}
/* We need to grab a reference to the event channel we are going to use
* to send the notify before releasing the reference we may already have
* (if someone has called this ioctl twice). This is required so that
* it is possible to change the clear_byte part of the notification
* without disturbing the event channel part, which may now be the last
* reference to that event channel.
*/
if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
if (evtchn_get(op.event_channel_port)) {
rc = -EINVAL;
goto unlock_out;
}
}
if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
evtchn_put(gref->notify.event);
gref->notify.flags = op.action;
gref->notify.pgoff = pgoff;
gref->notify.event = op.event_channel_port;
rc = 0;
unlock_out:
mutex_unlock(&gref_mutex);
return rc;
}
static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct gntalloc_file_private_data *priv = filp->private_data;
switch (cmd) {
case IOCTL_GNTALLOC_ALLOC_GREF:
return gntalloc_ioctl_alloc(priv, (void __user *)arg);
case IOCTL_GNTALLOC_DEALLOC_GREF:
return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
default:
return -ENOIOCTLCMD;
}
return 0;
}
static void gntalloc_vma_open(struct vm_area_struct *vma)
{
struct gntalloc_vma_private_data *priv = vma->vm_private_data;
if (!priv)
return;
mutex_lock(&gref_mutex);
priv->users++;
mutex_unlock(&gref_mutex);
}
static void gntalloc_vma_close(struct vm_area_struct *vma)
{
struct gntalloc_vma_private_data *priv = vma->vm_private_data;
struct gntalloc_gref *gref, *next;
int i;
if (!priv)
return;
mutex_lock(&gref_mutex);
priv->users--;
if (priv->users == 0) {
gref = priv->gref;
for (i = 0; i < priv->count; i++) {
gref->users--;
next = list_entry(gref->next_gref.next,
struct gntalloc_gref, next_gref);
if (gref->users == 0)
__del_gref(gref);
gref = next;
}
kfree(priv);
}
mutex_unlock(&gref_mutex);
}
static struct vm_operations_struct gntalloc_vmops = {
.open = gntalloc_vma_open,
.close = gntalloc_vma_close,
};
static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct gntalloc_file_private_data *priv = filp->private_data;
struct gntalloc_vma_private_data *vm_priv;
struct gntalloc_gref *gref;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int rv, i;
if (!(vma->vm_flags & VM_SHARED)) {
printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
return -EINVAL;
}
vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
if (!vm_priv)
return -ENOMEM;
mutex_lock(&gref_mutex);
pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
priv, vm_priv, vma->vm_pgoff, count);
gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
if (gref == NULL) {
rv = -ENOENT;
pr_debug("%s: Could not find grant reference",
__func__);
kfree(vm_priv);
goto out_unlock;
}
vm_priv->gref = gref;
vm_priv->users = 1;
vm_priv->count = count;
vma->vm_private_data = vm_priv;
vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
vma->vm_ops = &gntalloc_vmops;
for (i = 0; i < count; i++) {
gref->users++;
rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
gref->page);
if (rv)
goto out_unlock;
gref = list_entry(gref->next_file.next,
struct gntalloc_gref, next_file);
}
rv = 0;
out_unlock:
mutex_unlock(&gref_mutex);
return rv;
}
static const struct file_operations gntalloc_fops = {
.owner = THIS_MODULE,
.open = gntalloc_open,
.release = gntalloc_release,
.unlocked_ioctl = gntalloc_ioctl,
.mmap = gntalloc_mmap
};
/*
* -------------------------------------
* Module creation/destruction.
* -------------------------------------
*/
static struct miscdevice gntalloc_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "xen/gntalloc",
.fops = &gntalloc_fops,
};
static int __init gntalloc_init(void)
{
int err;
if (!xen_domain())
return -ENODEV;
err = misc_register(&gntalloc_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register misc gntalloc device\n");
return err;
}
pr_debug("Created grant allocation device at %d,%d\n",
MISC_MAJOR, gntalloc_miscdev.minor);
return 0;
}
static void __exit gntalloc_exit(void)
{
misc_deregister(&gntalloc_miscdev);
}
module_init(gntalloc_init);
module_exit(gntalloc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
"Daniel De Graaf <dgdegra@tycho.nsa.gov>");
MODULE_DESCRIPTION("User-space grant reference allocator driver");
| gpl-2.0 |
DooMLoRD/android_kernel_sony_msm8994 | arch/alpha/kernel/setup.c | 6812 | 39267 | /*
* linux/arch/alpha/kernel/setup.c
*
* Copyright (C) 1995 Linus Torvalds
*/
/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
/*
* Bootup setup stuff.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <linux/eisa.h>
#include <linux/pfn.h>
#ifdef CONFIG_MAGIC_SYSRQ
#include <linux/sysrq.h>
#include <linux/reboot.h>
#endif
#include <linux/notifier.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <linux/log2.h>
#include <linux/export.h>
extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
NULL,
INT_MAX /* try to do it first */
};
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/console.h>
#include "proto.h"
#include "pci_impl.h"
struct hwrpb_struct *hwrpb;
EXPORT_SYMBOL(hwrpb);
unsigned long srm_hae;
int alpha_l1i_cacheshape;
int alpha_l1d_cacheshape;
int alpha_l2_cacheshape;
int alpha_l3_cacheshape;
#ifdef CONFIG_VERBOSE_MCHECK
/* 0=minimum, 1=verbose, 2=all */
/* These can be overridden via the command line, ie "verbose_mcheck=2") */
unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
#endif
#ifdef CONFIG_NUMA
struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_to_cpumask_map);
#endif
/* Which processor we booted from. */
int boot_cpuid;
/*
* Using SRM callbacks for initial console output. This works from
* setup_arch() time through the end of time_init(), as those places
* are under our (Alpha) control.
* "srmcons" specified in the boot command arguments allows us to
* see kernel messages during the period of time before the true
* console device is "registered" during console_init().
* As of this version (2.5.59), console_init() will call
* disable_early_printk() as the last action before initializing
* the console drivers. That's the last possible time srmcons can be
* unregistered without interfering with console behavior.
*
* By default, OFF; set it with a bootcommand arg of "srmcons" or
* "console=srm". The meaning of these two args is:
* "srmcons" - early callback prints
* "console=srm" - full callback based console, including early prints
*/
int srmcons_output = 0;
/* Enforce a memory size limit; useful for testing. By default, none. */
unsigned long mem_size_limit = 0;
/* Set AGP GART window size (0 means disabled). */
unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
#ifdef CONFIG_ALPHA_GENERIC
struct alpha_machine_vector alpha_mv;
int alpha_using_srm;
EXPORT_SYMBOL(alpha_using_srm);
#endif
static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
unsigned long);
static struct alpha_machine_vector *get_sysvec_byname(const char *);
static void get_sysnames(unsigned long, unsigned long, unsigned long,
char **, char **);
static void determine_cpu_caches (unsigned int);
static char __initdata command_line[COMMAND_LINE_SIZE];
/*
* The format of "screen_info" is strange, and due to early
* i386-setup code. This is just enough to make the console
* code think we're on a VGA color display.
*/
struct screen_info screen_info = {
.orig_x = 0,
.orig_y = 25,
.orig_video_cols = 80,
.orig_video_lines = 25,
.orig_video_isVGA = 1,
.orig_video_points = 16
};
EXPORT_SYMBOL(screen_info);
/*
* The direct map I/O window, if any. This should be the same
* for all busses, since it's used by virt_to_bus.
*/
unsigned long __direct_map_base;
unsigned long __direct_map_size;
EXPORT_SYMBOL(__direct_map_base);
EXPORT_SYMBOL(__direct_map_size);
/*
* Declare all of the machine vectors.
*/
/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
__attribute__((weak)) or #pragma weak. Bypass it and talk directly
to the assembler. */
#define WEAK(X) \
extern struct alpha_machine_vector X; \
asm(".weak "#X)
WEAK(alcor_mv);
WEAK(alphabook1_mv);
WEAK(avanti_mv);
WEAK(cabriolet_mv);
WEAK(clipper_mv);
WEAK(dp264_mv);
WEAK(eb164_mv);
WEAK(eb64p_mv);
WEAK(eb66_mv);
WEAK(eb66p_mv);
WEAK(eiger_mv);
WEAK(jensen_mv);
WEAK(lx164_mv);
WEAK(lynx_mv);
WEAK(marvel_ev7_mv);
WEAK(miata_mv);
WEAK(mikasa_mv);
WEAK(mikasa_primo_mv);
WEAK(monet_mv);
WEAK(nautilus_mv);
WEAK(noname_mv);
WEAK(noritake_mv);
WEAK(noritake_primo_mv);
WEAK(p2k_mv);
WEAK(pc164_mv);
WEAK(privateer_mv);
WEAK(rawhide_mv);
WEAK(ruffian_mv);
WEAK(rx164_mv);
WEAK(sable_mv);
WEAK(sable_gamma_mv);
WEAK(shark_mv);
WEAK(sx164_mv);
WEAK(takara_mv);
WEAK(titan_mv);
WEAK(webbrick_mv);
WEAK(wildfire_mv);
WEAK(xl_mv);
WEAK(xlt_mv);
#undef WEAK
/*
* I/O resources inherited from PeeCees. Except for perhaps the
* turbochannel alphas, everyone has these on some sort of SuperIO chip.
*
* ??? If this becomes less standard, move the struct out into the
* machine vector.
*/
static void __init
reserve_std_resources(void)
{
static struct resource standard_io_resources[] = {
{ .name = "rtc", .start = -1, .end = -1 },
{ .name = "dma1", .start = 0x00, .end = 0x1f },
{ .name = "pic1", .start = 0x20, .end = 0x3f },
{ .name = "timer", .start = 0x40, .end = 0x5f },
{ .name = "keyboard", .start = 0x60, .end = 0x6f },
{ .name = "dma page reg", .start = 0x80, .end = 0x8f },
{ .name = "pic2", .start = 0xa0, .end = 0xbf },
{ .name = "dma2", .start = 0xc0, .end = 0xdf },
};
struct resource *io = &ioport_resource;
size_t i;
if (hose_head) {
struct pci_controller *hose;
for (hose = hose_head; hose; hose = hose->next)
if (hose->index == 0) {
io = hose->io_space;
break;
}
}
/* Fix up for the Jensen's queer RTC placement. */
standard_io_resources[0].start = RTC_PORT(0);
standard_io_resources[0].end = RTC_PORT(0) + 0x10;
for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
request_resource(io, standard_io_resources+i);
}
#define PFN_MAX PFN_DOWN(0x80000000)
#define for_each_mem_cluster(memdesc, _cluster, i) \
for ((_cluster) = (memdesc)->cluster, (i) = 0; \
(i) < (memdesc)->numclusters; (i)++, (_cluster)++)
static unsigned long __init
get_mem_size_limit(char *s)
{
unsigned long end = 0;
char *from = s;
end = simple_strtoul(from, &from, 0);
if ( *from == 'K' || *from == 'k' ) {
end = end << 10;
from++;
} else if ( *from == 'M' || *from == 'm' ) {
end = end << 20;
from++;
} else if ( *from == 'G' || *from == 'g' ) {
end = end << 30;
from++;
}
return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
}
#ifdef CONFIG_BLK_DEV_INITRD
void * __init
move_initrd(unsigned long mem_limit)
{
void *start;
unsigned long size;
size = initrd_end - initrd_start;
start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
}
memmove(start, (void *)initrd_start, size);
initrd_start = (unsigned long)start;
initrd_end = initrd_start + size;
printk("initrd moved to %p\n", start);
return start;
}
#endif
#ifndef CONFIG_DISCONTIGMEM
static void __init
setup_memory(void *kernel_end)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
unsigned long start_kernel_pfn, end_kernel_pfn;
unsigned long bootmap_size, bootmap_pages, bootmap_start;
unsigned long start, end;
unsigned long i;
/* Find free clusters, and init and free the bootmem accordingly. */
memdesc = (struct memdesc_struct *)
(hwrpb->mddt_offset + (unsigned long) hwrpb);
for_each_mem_cluster(memdesc, cluster, i) {
printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
i, cluster->usage, cluster->start_pfn,
cluster->start_pfn + cluster->numpages);
/* Bit 0 is console/PALcode reserved. Bit 1 is
non-volatile memory -- we might want to mark
this for later. */
if (cluster->usage & 3)
continue;
end = cluster->start_pfn + cluster->numpages;
if (end > max_low_pfn)
max_low_pfn = end;
}
/*
* Except for the NUMA systems (wildfire, marvel) all of the
* Alpha systems we run on support 32GB of memory or less.
* Since the NUMA systems introduce large holes in memory addressing,
* we can get into a situation where there is not enough contiguous
* memory for the memory map.
*
* Limit memory to the first 32GB to limit the NUMA systems to
* memory on their first node (wildfire) or 2 (marvel) to avoid
* not being able to produce the memory map. In order to access
* all of the memory on the NUMA systems, build with discontiguous
* memory support.
*
* If the user specified a memory limit, let that memory limit stand.
*/
if (!mem_size_limit)
mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
if (mem_size_limit && max_low_pfn >= mem_size_limit)
{
printk("setup: forcing memory size to %ldK (from %ldK).\n",
mem_size_limit << (PAGE_SHIFT - 10),
max_low_pfn << (PAGE_SHIFT - 10));
max_low_pfn = mem_size_limit;
}
/* Find the bounds of kernel memory. */
start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
bootmap_start = -1;
try_again:
if (max_low_pfn <= end_kernel_pfn)
panic("not enough memory to boot");
/* We need to know how many physically contiguous pages
we'll need for the bootmap. */
bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
/* Now find a good region where to allocate the bootmap. */
for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = start + cluster->numpages;
if (start >= max_low_pfn)
continue;
if (end > max_low_pfn)
end = max_low_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn
&& end - end_kernel_pfn >= bootmap_pages) {
bootmap_start = end_kernel_pfn;
break;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (end - start >= bootmap_pages) {
bootmap_start = start;
break;
}
}
if (bootmap_start == ~0UL) {
max_low_pfn >>= 1;
goto try_again;
}
/* Allocate the bootmap and mark the whole MM as reserved. */
bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
/* Mark the free regions. */
for_each_mem_cluster(memdesc, cluster, i) {
if (cluster->usage & 3)
continue;
start = cluster->start_pfn;
end = cluster->start_pfn + cluster->numpages;
if (start >= max_low_pfn)
continue;
if (end > max_low_pfn)
end = max_low_pfn;
if (start < start_kernel_pfn) {
if (end > end_kernel_pfn) {
free_bootmem(PFN_PHYS(start),
(PFN_PHYS(start_kernel_pfn)
- PFN_PHYS(start)));
printk("freeing pages %ld:%ld\n",
start, start_kernel_pfn);
start = end_kernel_pfn;
} else if (end > start_kernel_pfn)
end = start_kernel_pfn;
} else if (start < end_kernel_pfn)
start = end_kernel_pfn;
if (start >= end)
continue;
free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
printk("freeing pages %ld:%ld\n", start, end);
}
/* Reserve the bootmap memory. */
reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
BOOTMEM_DEFAULT);
printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = INITRD_START;
if (initrd_start) {
initrd_end = initrd_start+INITRD_SIZE;
printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
(void *) initrd_start, INITRD_SIZE);
if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
if (!move_initrd(PFN_PHYS(max_low_pfn)))
printk("initrd extends beyond end of memory "
"(0x%08lx > 0x%p)\ndisabling initrd\n",
initrd_end,
phys_to_virt(PFN_PHYS(max_low_pfn)));
} else {
reserve_bootmem(virt_to_phys((void *)initrd_start),
INITRD_SIZE, BOOTMEM_DEFAULT);
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
#else
extern void setup_memory(void *);
#endif /* !CONFIG_DISCONTIGMEM */
int __init
page_is_ram(unsigned long pfn)
{
struct memclust_struct * cluster;
struct memdesc_struct * memdesc;
unsigned long i;
memdesc = (struct memdesc_struct *)
(hwrpb->mddt_offset + (unsigned long) hwrpb);
for_each_mem_cluster(memdesc, cluster, i)
{
if (pfn >= cluster->start_pfn &&
pfn < cluster->start_pfn + cluster->numpages) {
return (cluster->usage & 3) ? 0 : 1;
}
}
return 0;
}
static int __init
register_cpus(void)
{
int i;
for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
register_cpu(p, i);
}
return 0;
}
arch_initcall(register_cpus);
void __init
setup_arch(char **cmdline_p)
{
extern char _end[];
struct alpha_machine_vector *vec = NULL;
struct percpu_struct *cpu;
char *type_name, *var_name, *p;
void *kernel_end = _end; /* end of kernel */
char *args = command_line;
hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
boot_cpuid = hard_smp_processor_id();
/*
* Pre-process the system type to make sure it will be valid.
*
* This may restore real CABRIO and EB66+ family names, ie
* EB64+ and EB66.
*
* Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
* and AS1200 (DIGITAL Server 5000 series) have the type as
* the negative of the real one.
*/
if ((long)hwrpb->sys_type < 0) {
hwrpb->sys_type = -((long)hwrpb->sys_type);
hwrpb_update_checksum(hwrpb);
}
/* Register a call for panic conditions. */
atomic_notifier_chain_register(&panic_notifier_list,
&alpha_panic_block);
#ifdef CONFIG_ALPHA_GENERIC
/* Assume that we've booted from SRM if we haven't booted from MILO.
Detect the later by looking for "MILO" in the system serial nr. */
alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
#endif
/* If we are using SRM, we want to allow callbacks
as early as possible, so do this NOW, and then
they should work immediately thereafter.
*/
kernel_end = callback_init(kernel_end);
/*
* Locate the command line.
*/
/* Hack for Jensen... since we're restricted to 8 or 16 chars for
boot flags depending on the boot mode, we need some shorthand.
This should do for installation. */
if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
} else {
strlcpy(command_line, COMMAND_LINE, sizeof command_line);
}
strcpy(boot_command_line, command_line);
*cmdline_p = command_line;
/*
* Process command-line arguments.
*/
while ((p = strsep(&args, " \t")) != NULL) {
if (!*p) continue;
if (strncmp(p, "alpha_mv=", 9) == 0) {
vec = get_sysvec_byname(p+9);
continue;
}
if (strncmp(p, "cycle=", 6) == 0) {
est_cycle_freq = simple_strtol(p+6, NULL, 0);
continue;
}
if (strncmp(p, "mem=", 4) == 0) {
mem_size_limit = get_mem_size_limit(p+4);
continue;
}
if (strncmp(p, "srmcons", 7) == 0) {
srmcons_output |= 1;
continue;
}
if (strncmp(p, "console=srm", 11) == 0) {
srmcons_output |= 2;
continue;
}
if (strncmp(p, "gartsize=", 9) == 0) {
alpha_agpgart_size =
get_mem_size_limit(p+9) << PAGE_SHIFT;
continue;
}
#ifdef CONFIG_VERBOSE_MCHECK
if (strncmp(p, "verbose_mcheck=", 15) == 0) {
alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
continue;
}
#endif
}
/* Replace the command line, now that we've killed it with strsep. */
strcpy(command_line, boot_command_line);
/* If we want SRM console printk echoing early, do it now. */
if (alpha_using_srm && srmcons_output) {
register_srm_console();
/*
* If "console=srm" was specified, clear the srmcons_output
* flag now so that time.c won't unregister_srm_console
*/
if (srmcons_output & 2)
srmcons_output = 0;
}
#ifdef CONFIG_MAGIC_SYSRQ
/* If we're using SRM, make sysrq-b halt back to the prom,
not auto-reboot. */
if (alpha_using_srm) {
struct sysrq_key_op *op = __sysrq_get_key_op('b');
op->handler = (void *) machine_halt;
}
#endif
/*
* Identify and reconfigure for the current system.
*/
cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
cpu->type, &type_name, &var_name);
if (*var_name == '0')
var_name = "";
if (!vec) {
vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
cpu->type);
}
if (!vec) {
panic("Unsupported system type: %s%s%s (%ld %ld)\n",
type_name, (*var_name ? " variation " : ""), var_name,
hwrpb->sys_type, hwrpb->sys_variation);
}
if (vec != &alpha_mv) {
alpha_mv = *vec;
}
printk("Booting "
#ifdef CONFIG_ALPHA_GENERIC
"GENERIC "
#endif
"on %s%s%s using machine vector %s from %s\n",
type_name, (*var_name ? " variation " : ""),
var_name, alpha_mv.vector_name,
(alpha_using_srm ? "SRM" : "MILO"));
printk("Major Options: "
#ifdef CONFIG_SMP
"SMP "
#endif
#ifdef CONFIG_ALPHA_EV56
"EV56 "
#endif
#ifdef CONFIG_ALPHA_EV67
"EV67 "
#endif
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
"LEGACY_START "
#endif
#ifdef CONFIG_VERBOSE_MCHECK
"VERBOSE_MCHECK "
#endif
#ifdef CONFIG_DISCONTIGMEM
"DISCONTIGMEM "
#ifdef CONFIG_NUMA
"NUMA "
#endif
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
"DEBUG_SPINLOCK "
#endif
#ifdef CONFIG_MAGIC_SYSRQ
"MAGIC_SYSRQ "
#endif
"\n");
printk("Command line: %s\n", command_line);
/*
* Sync up the HAE.
* Save the SRM's current value for restoration.
*/
srm_hae = *alpha_mv.hae_register;
__set_hae(alpha_mv.hae_cache);
/* Reset enable correctable error reports. */
wrmces(0x7);
/* Find our memory. */
setup_memory(kernel_end);
/* First guess at cpu cache sizes. Do this before init_arch. */
determine_cpu_caches(cpu->type);
/* Initialize the machine. Usually has to do with setting up
DMA windows and the like. */
if (alpha_mv.init_arch)
alpha_mv.init_arch();
/* Reserve standard resources. */
reserve_std_resources();
/*
* Give us a default console. TGA users will see nothing until
* chr_dev_init is called, rather late in the boot sequence.
*/
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
/* Default root filesystem to sda2. */
ROOT_DEV = Root_SDA2;
#ifdef CONFIG_EISA
/* FIXME: only set this when we actually have EISA in this box? */
EISA_bus = 1;
#endif
/*
* Check ASN in HWRPB for validity, report if bad.
* FIXME: how was this failing? Should we trust it instead,
* and copy the value into alpha_mv.max_asn?
*/
if (hwrpb->max_asn != MAX_ASN) {
printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
}
/*
* Identify the flock of penguins.
*/
#ifdef CONFIG_SMP
setup_smp();
#endif
paging_init();
}
static char sys_unknown[] = "Unknown";
static char systype_names[][16] = {
"0",
"ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
"Pelican", "Morgan", "Sable", "Medulla", "Noname",
"Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
"Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
"Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
"Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
"Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
};
static char unofficial_names[][8] = {"100", "Ruffian"};
static char api_names[][16] = {"200", "Nautilus"};
static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
static int eb64p_indices[] = {0,0,1,2};
static char eb66_names[][8] = {"EB66", "EB66+"};
static int eb66_indices[] = {0,0,1};
static char marvel_names[][16] = {
"Marvel/EV7"
};
static int marvel_indices[] = { 0 };
static char rawhide_names[][16] = {
"Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
};
static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
static char titan_names[][16] = {
"DEFAULT", "Privateer", "Falcon", "Granite"
};
static int titan_indices[] = {0,1,2,2,3};
static char tsunami_names[][16] = {
"0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
"Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
"Flying Clipper", "Shark"
};
static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
static struct alpha_machine_vector * __init
get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
{
static struct alpha_machine_vector *systype_vecs[] __initdata =
{
NULL, /* 0 */
NULL, /* ADU */
NULL, /* Cobra */
NULL, /* Ruby */
NULL, /* Flamingo */
NULL, /* Mannequin */
&jensen_mv,
NULL, /* Pelican */
NULL, /* Morgan */
NULL, /* Sable -- see below. */
NULL, /* Medulla */
&noname_mv,
NULL, /* Turbolaser */
&avanti_mv,
NULL, /* Mustang */
NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
NULL, /* Tradewind */
NULL, /* Mikasa -- see below. */
NULL, /* EB64 */
NULL, /* EB66 -- see variation. */
NULL, /* EB64+ -- see variation. */
&alphabook1_mv,
&rawhide_mv,
NULL, /* K2 */
&lynx_mv, /* Lynx */
&xl_mv,
NULL, /* EB164 -- see variation. */
NULL, /* Noritake -- see below. */
NULL, /* Cortex */
NULL, /* 29 */
&miata_mv,
NULL, /* XXM */
&takara_mv,
NULL, /* Yukon */
NULL, /* Tsunami -- see variation. */
&wildfire_mv, /* Wildfire */
NULL, /* CUSCO */
&eiger_mv, /* Eiger */
NULL, /* Titan */
NULL, /* Marvel */
};
static struct alpha_machine_vector *unofficial_vecs[] __initdata =
{
NULL, /* 100 */
&ruffian_mv,
};
static struct alpha_machine_vector *api_vecs[] __initdata =
{
NULL, /* 200 */
&nautilus_mv,
};
static struct alpha_machine_vector *alcor_vecs[] __initdata =
{
&alcor_mv, &xlt_mv, &xlt_mv
};
static struct alpha_machine_vector *eb164_vecs[] __initdata =
{
&eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
};
static struct alpha_machine_vector *eb64p_vecs[] __initdata =
{
&eb64p_mv,
&cabriolet_mv,
&cabriolet_mv /* AlphaPCI64 */
};
static struct alpha_machine_vector *eb66_vecs[] __initdata =
{
&eb66_mv,
&eb66p_mv
};
static struct alpha_machine_vector *marvel_vecs[] __initdata =
{
&marvel_ev7_mv,
};
static struct alpha_machine_vector *titan_vecs[] __initdata =
{
&titan_mv, /* default */
&privateer_mv, /* privateer */
&titan_mv, /* falcon */
&privateer_mv, /* granite */
};
static struct alpha_machine_vector *tsunami_vecs[] __initdata =
{
NULL,
&dp264_mv, /* dp264 */
&dp264_mv, /* warhol */
&dp264_mv, /* windjammer */
&monet_mv, /* monet */
&clipper_mv, /* clipper */
&dp264_mv, /* goldrush */
&webbrick_mv, /* webbrick */
&dp264_mv, /* catamaran */
NULL, /* brisbane? */
NULL, /* melbourne? */
NULL, /* flying clipper? */
&shark_mv, /* shark */
};
/* ??? Do we need to distinguish between Rawhides? */
struct alpha_machine_vector *vec;
/* Search the system tables first... */
vec = NULL;
if (type < ARRAY_SIZE(systype_vecs)) {
vec = systype_vecs[type];
} else if ((type > ST_API_BIAS) &&
(type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
vec = api_vecs[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
}
/* If we've not found one, try for a variation. */
if (!vec) {
/* Member ID is a bit-field. */
unsigned long member = (variation >> 10) & 0x3f;
cpu &= 0xffffffff; /* make it usable */
switch (type) {
case ST_DEC_ALCOR:
if (member < ARRAY_SIZE(alcor_indices))
vec = alcor_vecs[alcor_indices[member]];
break;
case ST_DEC_EB164:
if (member < ARRAY_SIZE(eb164_indices))
vec = eb164_vecs[eb164_indices[member]];
/* PC164 may show as EB164 variation with EV56 CPU,
but, since no true EB164 had anything but EV5... */
if (vec == &eb164_mv && cpu == EV56_CPU)
vec = &pc164_mv;
break;
case ST_DEC_EB64P:
if (member < ARRAY_SIZE(eb64p_indices))
vec = eb64p_vecs[eb64p_indices[member]];
break;
case ST_DEC_EB66:
if (member < ARRAY_SIZE(eb66_indices))
vec = eb66_vecs[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
if (member < ARRAY_SIZE(marvel_indices))
vec = marvel_vecs[marvel_indices[member]];
break;
case ST_DEC_TITAN:
vec = titan_vecs[0]; /* default */
if (member < ARRAY_SIZE(titan_indices))
vec = titan_vecs[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
if (member < ARRAY_SIZE(tsunami_indices))
vec = tsunami_vecs[tsunami_indices[member]];
break;
case ST_DEC_1000:
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &mikasa_primo_mv;
else
vec = &mikasa_mv;
break;
case ST_DEC_NORITAKE:
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &noritake_primo_mv;
else
vec = &noritake_mv;
break;
case ST_DEC_2100_A500:
if (cpu == EV5_CPU || cpu == EV56_CPU)
vec = &sable_gamma_mv;
else
vec = &sable_mv;
break;
}
}
return vec;
}
static struct alpha_machine_vector * __init
get_sysvec_byname(const char *name)
{
static struct alpha_machine_vector *all_vecs[] __initdata =
{
&alcor_mv,
&alphabook1_mv,
&avanti_mv,
&cabriolet_mv,
&clipper_mv,
&dp264_mv,
&eb164_mv,
&eb64p_mv,
&eb66_mv,
&eb66p_mv,
&eiger_mv,
&jensen_mv,
&lx164_mv,
&lynx_mv,
&miata_mv,
&mikasa_mv,
&mikasa_primo_mv,
&monet_mv,
&nautilus_mv,
&noname_mv,
&noritake_mv,
&noritake_primo_mv,
&p2k_mv,
&pc164_mv,
&privateer_mv,
&rawhide_mv,
&ruffian_mv,
&rx164_mv,
&sable_mv,
&sable_gamma_mv,
&shark_mv,
&sx164_mv,
&takara_mv,
&webbrick_mv,
&wildfire_mv,
&xl_mv,
&xlt_mv
};
size_t i;
for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
struct alpha_machine_vector *mv = all_vecs[i];
if (strcasecmp(mv->vector_name, name) == 0)
return mv;
}
return NULL;
}
static void
get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
char **type_name, char **variation_name)
{
unsigned long member;
/* If not in the tables, make it UNKNOWN,
else set type name to family */
if (type < ARRAY_SIZE(systype_names)) {
*type_name = systype_names[type];
} else if ((type > ST_API_BIAS) &&
(type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
*type_name = api_names[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
} else {
*type_name = sys_unknown;
*variation_name = sys_unknown;
return;
}
/* Set variation to "0"; if variation is zero, done. */
*variation_name = systype_names[0];
if (variation == 0) {
return;
}
member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
cpu &= 0xffffffff; /* make it usable */
switch (type) { /* select by family */
default: /* default to variation "0" for now */
break;
case ST_DEC_EB164:
if (member < ARRAY_SIZE(eb164_indices))
*variation_name = eb164_names[eb164_indices[member]];
/* PC164 may show as EB164 variation, but with EV56 CPU,
so, since no true EB164 had anything but EV5... */
if (eb164_indices[member] == 0 && cpu == EV56_CPU)
*variation_name = eb164_names[1]; /* make it PC164 */
break;
case ST_DEC_ALCOR:
if (member < ARRAY_SIZE(alcor_indices))
*variation_name = alcor_names[alcor_indices[member]];
break;
case ST_DEC_EB64P:
if (member < ARRAY_SIZE(eb64p_indices))
*variation_name = eb64p_names[eb64p_indices[member]];
break;
case ST_DEC_EB66:
if (member < ARRAY_SIZE(eb66_indices))
*variation_name = eb66_names[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
if (member < ARRAY_SIZE(marvel_indices))
*variation_name = marvel_names[marvel_indices[member]];
break;
case ST_DEC_RAWHIDE:
if (member < ARRAY_SIZE(rawhide_indices))
*variation_name = rawhide_names[rawhide_indices[member]];
break;
case ST_DEC_TITAN:
*variation_name = titan_names[0]; /* default */
if (member < ARRAY_SIZE(titan_indices))
*variation_name = titan_names[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
if (member < ARRAY_SIZE(tsunami_indices))
*variation_name = tsunami_names[tsunami_indices[member]];
break;
}
}
/*
* A change was made to the HWRPB via an ECO and the following code
* tracks a part of the ECO. In HWRPB versions less than 5, the ECO
* was not implemented in the console firmware. If it's revision 5 or
* greater we can get the name of the platform as an ASCII string from
* the HWRPB. That's what this function does. It checks the revision
* level and if the string is in the HWRPB it returns the address of
* the string--a pointer to the name of the platform.
*
* Returns:
* - Pointer to a ASCII string if it's in the HWRPB
* - Pointer to a blank string if the data is not in the HWRPB.
*/
static char *
platform_string(void)
{
struct dsr_struct *dsr;
static char unk_system_string[] = "N/A";
/* Go to the console for the string pointer.
* If the rpb_vers is not 5 or greater the rpb
* is old and does not have this data in it.
*/
if (hwrpb->revision < 5)
return (unk_system_string);
else {
/* The Dynamic System Recognition struct
* has the system platform name starting
* after the character count of the string.
*/
dsr = ((struct dsr_struct *)
((char *)hwrpb + hwrpb->dsr_offset));
return ((char *)dsr + (dsr->sysname_off +
sizeof(long)));
}
}
static int
get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
{
struct percpu_struct *cpu;
unsigned long i;
int count = 0;
for (i = 0; i < num; i++) {
cpu = (struct percpu_struct *)
((char *)cpubase + i*hwrpb->processor_size);
if ((cpu->flags & 0x1cc) == 0x1cc)
count++;
}
return count;
}
static void
show_cache_size (struct seq_file *f, const char *which, int shape)
{
if (shape == -1)
seq_printf (f, "%s\t\t: n/a\n", which);
else if (shape == 0)
seq_printf (f, "%s\t\t: unknown\n", which);
else
seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
which, shape >> 10, shape & 15,
1 << ((shape >> 4) & 15));
}
static int
show_cpuinfo(struct seq_file *f, void *slot)
{
extern struct unaligned_stat {
unsigned long count, va, pc;
} unaligned[2];
static char cpu_names[][8] = {
"EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
"EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
"EV68CX", "EV7", "EV79", "EV69"
};
struct percpu_struct *cpu = slot;
unsigned int cpu_index;
char *cpu_name;
char *systype_name;
char *sysvariation_name;
int nr_processors;
cpu_index = (unsigned) (cpu->type - 1);
cpu_name = "Unknown";
if (cpu_index < ARRAY_SIZE(cpu_names))
cpu_name = cpu_names[cpu_index];
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
cpu->type, &systype_name, &sysvariation_name);
nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
seq_printf(f, "cpu\t\t\t: Alpha\n"
"cpu model\t\t: %s\n"
"cpu variation\t\t: %ld\n"
"cpu revision\t\t: %ld\n"
"cpu serial number\t: %s\n"
"system type\t\t: %s\n"
"system variation\t: %s\n"
"system revision\t\t: %ld\n"
"system serial number\t: %s\n"
"cycle frequency [Hz]\t: %lu %s\n"
"timer frequency [Hz]\t: %lu.%02lu\n"
"page size [bytes]\t: %ld\n"
"phys. address bits\t: %ld\n"
"max. addr. space #\t: %ld\n"
"BogoMIPS\t\t: %lu.%02lu\n"
"kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
"user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
"platform string\t\t: %s\n"
"cpus detected\t\t: %d\n",
cpu_name, cpu->variation, cpu->revision,
(char*)cpu->serial_no,
systype_name, sysvariation_name, hwrpb->sys_revision,
(char*)hwrpb->ssn,
est_cycle_freq ? : hwrpb->cycle_freq,
est_cycle_freq ? "est." : "",
hwrpb->intr_freq / 4096,
(100 * hwrpb->intr_freq / 4096) % 100,
hwrpb->pagesize,
hwrpb->pa_bits,
hwrpb->max_asn,
loops_per_jiffy / (500000/HZ),
(loops_per_jiffy / (5000/HZ)) % 100,
unaligned[0].count, unaligned[0].pc, unaligned[0].va,
unaligned[1].count, unaligned[1].pc, unaligned[1].va,
platform_string(), nr_processors);
#ifdef CONFIG_SMP
seq_printf(f, "cpus active\t\t: %u\n"
"cpu active mask\t\t: %016lx\n",
num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
#endif
show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
return 0;
}
static int __init
read_mem_block(int *addr, int stride, int size)
{
long nloads = size / stride, cnt, tmp;
__asm__ __volatile__(
" rpcc %0\n"
"1: ldl %3,0(%2)\n"
" subq %1,1,%1\n"
/* Next two XORs introduce an explicit data dependency between
consecutive loads in the loop, which will give us true load
latency. */
" xor %3,%2,%2\n"
" xor %3,%2,%2\n"
" addq %2,%4,%2\n"
" bne %1,1b\n"
" rpcc %3\n"
" subl %3,%0,%0\n"
: "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
: "r" (stride), "1" (nloads), "2" (addr));
return cnt / (size / stride);
}
#define CSHAPE(totalsize, linesize, assoc) \
((totalsize & ~0xff) | (linesize << 4) | assoc)
/* ??? EV5 supports up to 64M, but did the systems with more than
16M of BCACHE ever exist? */
#define MAX_BCACHE_SIZE 16*1024*1024
/* Note that the offchip caches are direct mapped on all Alphas. */
static int __init
external_cache_probe(int minsize, int width)
{
int cycles, prev_cycles = 1000000;
int stride = 1 << width;
long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
/* Get the first block cached. */
read_mem_block(__va(0), stride, size);
while (size < maxsize) {
/* Get an average load latency in cycles. */
cycles = read_mem_block(__va(0), stride, size);
if (cycles > prev_cycles * 2) {
/* Fine, we exceed the cache. */
printk("%ldK Bcache detected; load hit latency %d "
"cycles, load miss latency %d cycles\n",
size >> 11, prev_cycles, cycles);
return CSHAPE(size >> 1, width, 1);
}
/* Try to get the next block cached. */
read_mem_block(__va(size), stride, size);
prev_cycles = cycles;
size <<= 1;
}
return -1; /* No BCACHE found. */
}
static void __init
determine_cpu_caches (unsigned int cpu_type)
{
int L1I, L1D, L2, L3;
switch (cpu_type) {
case EV4_CPU:
case EV45_CPU:
{
if (cpu_type == EV4_CPU)
L1I = CSHAPE(8*1024, 5, 1);
else
L1I = CSHAPE(16*1024, 5, 1);
L1D = L1I;
L3 = -1;
/* BIU_CTL is a write-only Abox register. PALcode has a
shadow copy, and may be available from some versions
of the CSERVE PALcall. If we can get it, then
unsigned long biu_ctl, size;
size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
L2 = CSHAPE (size, 5, 1);
Unfortunately, we can't rely on that.
*/
L2 = external_cache_probe(128*1024, 5);
break;
}
case LCA4_CPU:
{
unsigned long car, size;
L1I = L1D = CSHAPE(8*1024, 5, 1);
L3 = -1;
car = *(vuip) phys_to_virt (0x120000078UL);
size = 64*1024 * (1 << ((car >> 5) & 7));
/* No typo -- 8 byte cacheline size. Whodathunk. */
L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
break;
}
case EV5_CPU:
case EV56_CPU:
{
unsigned long sc_ctl, width;
L1I = L1D = CSHAPE(8*1024, 5, 1);
/* Check the line size of the Scache. */
sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
width = sc_ctl & 0x1000 ? 6 : 5;
L2 = CSHAPE (96*1024, width, 3);
/* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
has a shadow copy, and may be available from some versions
of the CSERVE PALcall. If we can get it, then
unsigned long bc_control, bc_config, size;
size = 1024*1024 * (1 << ((bc_config & 7) - 1));
L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
Unfortunately, we can't rely on that.
*/
L3 = external_cache_probe(1024*1024, width);
break;
}
case PCA56_CPU:
case PCA57_CPU:
{
if (cpu_type == PCA56_CPU) {
L1I = CSHAPE(16*1024, 6, 1);
L1D = CSHAPE(8*1024, 5, 1);
} else {
L1I = CSHAPE(32*1024, 6, 2);
L1D = CSHAPE(16*1024, 5, 1);
}
L3 = -1;
#if 0
unsigned long cbox_config, size;
cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
#else
L2 = external_cache_probe(512*1024, 6);
#endif
break;
}
case EV6_CPU:
case EV67_CPU:
case EV68CB_CPU:
case EV68AL_CPU:
case EV68CX_CPU:
case EV69_CPU:
L1I = L1D = CSHAPE(64*1024, 6, 2);
L2 = external_cache_probe(1024*1024, 6);
L3 = -1;
break;
case EV7_CPU:
case EV79_CPU:
L1I = L1D = CSHAPE(64*1024, 6, 2);
L2 = CSHAPE(7*1024*1024/4, 6, 7);
L3 = -1;
break;
default:
/* Nothing known about this cpu type. */
L1I = L1D = L2 = L3 = 0;
break;
}
alpha_l1i_cacheshape = L1I;
alpha_l1d_cacheshape = L1D;
alpha_l2_cacheshape = L2;
alpha_l3_cacheshape = L3;
}
/*
* We show only CPU #0 info.
*/
static void *
c_start(struct seq_file *f, loff_t *pos)
{
return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
}
static void *
c_next(struct seq_file *f, void *v, loff_t *pos)
{
return NULL;
}
static void
c_stop(struct seq_file *f, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
static int
alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
#if 1
/* FIXME FIXME FIXME */
/* If we are using SRM and serial console, just hard halt here. */
if (alpha_using_srm && srmcons_output)
__halt();
#endif
return NOTIFY_DONE;
}
static __init int add_pcspkr(void)
{
struct platform_device *pd;
int ret;
pd = platform_device_alloc("pcspkr", -1);
if (!pd)
return -ENOMEM;
ret = platform_device_add(pd);
if (ret)
platform_device_put(pd);
return ret;
}
device_initcall(add_pcspkr);
| gpl-2.0 |
Ander-Alvarez/CoffeeKernel | drivers/spi/spi-fsl-espi.c | 2205 | 18087 | /*
* Freescale eSPI controller driver.
*
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/mm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
#include "spi-fsl-lib.h"
/* eSPI Controller registers */
struct fsl_espi_reg {
__be32 mode; /* 0x000 - eSPI mode register */
__be32 event; /* 0x004 - eSPI event register */
__be32 mask; /* 0x008 - eSPI mask register */
__be32 command; /* 0x00c - eSPI command register */
__be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/
__be32 receive; /* 0x014 - eSPI receive FIFO access register*/
u8 res[8]; /* 0x018 - 0x01c reserved */
__be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */
};
struct fsl_espi_transfer {
const void *tx_buf;
void *rx_buf;
unsigned len;
unsigned n_tx;
unsigned n_rx;
unsigned actual_length;
int status;
};
/* eSPI Controller mode register definitions */
#define SPMODE_ENABLE (1 << 31)
#define SPMODE_LOOP (1 << 30)
#define SPMODE_TXTHR(x) ((x) << 8)
#define SPMODE_RXTHR(x) ((x) << 0)
/* eSPI Controller CS mode register definitions */
#define CSMODE_CI_INACTIVEHIGH (1 << 31)
#define CSMODE_CP_BEGIN_EDGECLK (1 << 30)
#define CSMODE_REV (1 << 29)
#define CSMODE_DIV16 (1 << 28)
#define CSMODE_PM(x) ((x) << 24)
#define CSMODE_POL_1 (1 << 20)
#define CSMODE_LEN(x) ((x) << 16)
#define CSMODE_BEF(x) ((x) << 12)
#define CSMODE_AFT(x) ((x) << 8)
#define CSMODE_CG(x) ((x) << 3)
/* Default mode/csmode for eSPI controller */
#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
| CSMODE_AFT(0) | CSMODE_CG(1))
/* SPIE register values */
#define SPIE_NE 0x00000200 /* Not empty */
#define SPIE_NF 0x00000100 /* Not full */
/* SPIM register values */
#define SPIM_NE 0x00000200 /* Not empty */
#define SPIM_NF 0x00000100 /* Not full */
#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
#define SPCOM_TRANLEN(x) ((x) << 0)
#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
static void fsl_espi_change_mode(struct spi_device *spi)
{
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
struct fsl_espi_reg *reg_base = mspi->reg_base;
__be32 __iomem *mode = ®_base->csmode[spi->chip_select];
__be32 __iomem *espi_mode = ®_base->mode;
u32 tmp;
unsigned long flags;
/* Turn off IRQs locally to minimize time that SPI is disabled. */
local_irq_save(flags);
/* Turn off SPI unit prior changing mode */
tmp = mpc8xxx_spi_read_reg(espi_mode);
mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE);
mpc8xxx_spi_write_reg(mode, cs->hw_mode);
mpc8xxx_spi_write_reg(espi_mode, tmp);
local_irq_restore(flags);
}
static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
{
u32 data;
u16 data_h;
u16 data_l;
const u32 *tx = mpc8xxx_spi->tx;
if (!tx)
return 0;
data = *tx++ << mpc8xxx_spi->tx_shift;
data_l = data & 0xffff;
data_h = (data >> 16) & 0xffff;
swab16s(&data_l);
swab16s(&data_h);
data = data_h | data_l;
mpc8xxx_spi->tx = tx;
return data;
}
static int fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
int bits_per_word = 0;
u8 pm;
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (t) {
bits_per_word = t->bits_per_word;
hz = t->speed_hz;
}
/* spi_transfer level calls that work per-word */
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
/* Make sure its a bit width we support [4..16] */
if ((bits_per_word < 4) || (bits_per_word > 16))
return -EINVAL;
if (!hz)
hz = spi->max_speed_hz;
cs->rx_shift = 0;
cs->tx_shift = 0;
cs->get_rx = mpc8xxx_spi_rx_buf_u32;
cs->get_tx = mpc8xxx_spi_tx_buf_u32;
if (bits_per_word <= 8) {
cs->rx_shift = 8 - bits_per_word;
} else if (bits_per_word <= 16) {
cs->rx_shift = 16 - bits_per_word;
if (spi->mode & SPI_LSB_FIRST)
cs->get_tx = fsl_espi_tx_buf_lsb;
} else {
return -EINVAL;
}
mpc8xxx_spi->rx_shift = cs->rx_shift;
mpc8xxx_spi->tx_shift = cs->tx_shift;
mpc8xxx_spi->get_rx = cs->get_rx;
mpc8xxx_spi->get_tx = cs->get_tx;
bits_per_word = bits_per_word - 1;
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
cs->hw_mode |= CSMODE_LEN(bits_per_word);
if ((mpc8xxx_spi->spibrg / hz) > 64) {
cs->hw_mode |= CSMODE_DIV16;
pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
"Will use %d Hz instead.\n", dev_name(&spi->dev),
hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
if (pm > 33)
pm = 33;
} else {
pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
}
if (pm)
pm--;
if (pm < 2)
pm = 2;
cs->hw_mode |= CSMODE_PM(pm);
fsl_espi_change_mode(spi);
return 0;
}
static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t,
unsigned int len)
{
u32 word;
struct fsl_espi_reg *reg_base = mspi->reg_base;
mspi->count = len;
/* enable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE);
/* transmit word */
word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
return 0;
}
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
u8 bits_per_word;
int ret;
bits_per_word = spi->bits_per_word;
if (t->bits_per_word)
bits_per_word = t->bits_per_word;
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
mpc8xxx_spi->tx = t->tx_buf;
mpc8xxx_spi->rx = t->rx_buf;
INIT_COMPLETION(mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
" beyond the SPCOM[TRANLEN] field\n", t->len);
return -EINVAL;
}
mpc8xxx_spi_write_reg(®_base->command,
(SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len);
if (ret)
return ret;
wait_for_completion(&mpc8xxx_spi->done);
/* disable rx ints */
mpc8xxx_spi_write_reg(®_base->mask, 0);
return mpc8xxx_spi->count;
}
static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
{
if (cmd) {
cmd[1] = (u8)(addr >> 16);
cmd[2] = (u8)(addr >> 8);
cmd[3] = (u8)(addr >> 0);
}
}
static inline unsigned int fsl_espi_cmd2addr(u8 *cmd)
{
if (cmd)
return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
return 0;
}
static void fsl_espi_do_trans(struct spi_message *m,
struct fsl_espi_transfer *tr)
{
struct spi_device *spi = m->spi;
struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
struct fsl_espi_transfer *espi_trans = tr;
struct spi_message message;
struct spi_transfer *t, *first, trans;
int status = 0;
spi_message_init(&message);
memset(&trans, 0, sizeof(trans));
first = list_first_entry(&m->transfers, struct spi_transfer,
transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
if ((first->bits_per_word != t->bits_per_word) ||
(first->speed_hz != t->speed_hz)) {
espi_trans->status = -EINVAL;
dev_err(mspi->dev, "bits_per_word/speed_hz should be"
" same for the same SPI transfer\n");
return;
}
trans.speed_hz = t->speed_hz;
trans.bits_per_word = t->bits_per_word;
trans.delay_usecs = max(first->delay_usecs, t->delay_usecs);
}
trans.len = espi_trans->len;
trans.tx_buf = espi_trans->tx_buf;
trans.rx_buf = espi_trans->rx_buf;
spi_message_add_tail(&trans, &message);
list_for_each_entry(t, &message.transfers, transfer_list) {
if (t->bits_per_word || t->speed_hz) {
status = -EINVAL;
status = fsl_espi_setup_transfer(spi, t);
if (status < 0)
break;
}
if (t->len)
status = fsl_espi_bufs(spi, t);
if (status) {
status = -EMSGSIZE;
break;
}
if (t->delay_usecs)
udelay(t->delay_usecs);
}
espi_trans->status = status;
fsl_espi_setup_transfer(spi, NULL);
}
static void fsl_espi_cmd_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct spi_transfer *t;
u8 *local_buf;
int i = 0;
struct fsl_espi_transfer *espi_trans = trans;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
espi_trans->status = -ENOMEM;
return;
}
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
}
}
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf + espi_trans->n_tx;
fsl_espi_do_trans(m, espi_trans);
espi_trans->actual_length = espi_trans->len;
kfree(local_buf);
}
static void fsl_espi_rw_trans(struct spi_message *m,
struct fsl_espi_transfer *trans, u8 *rx_buff)
{
struct fsl_espi_transfer *espi_trans = trans;
unsigned int n_tx = espi_trans->n_tx;
unsigned int n_rx = espi_trans->n_rx;
struct spi_transfer *t;
u8 *local_buf;
u8 *rx_buf = rx_buff;
unsigned int trans_len;
unsigned int addr;
int i, pos, loop;
local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
if (!local_buf) {
espi_trans->status = -ENOMEM;
return;
}
for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
trans_len = n_rx - pos;
if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
trans_len = SPCOM_TRANLEN_MAX - n_tx;
i = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf) {
memcpy(local_buf + i, t->tx_buf, t->len);
i += t->len;
}
}
if (pos > 0) {
addr = fsl_espi_cmd2addr(local_buf);
addr += pos;
fsl_espi_addr2cmd(addr, local_buf);
}
espi_trans->n_tx = n_tx;
espi_trans->n_rx = trans_len;
espi_trans->len = trans_len + n_tx;
espi_trans->tx_buf = local_buf;
espi_trans->rx_buf = local_buf + n_tx;
fsl_espi_do_trans(m, espi_trans);
memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
if (loop > 0)
espi_trans->actual_length += espi_trans->len - n_tx;
else
espi_trans->actual_length += espi_trans->len;
}
kfree(local_buf);
}
static void fsl_espi_do_one_msg(struct spi_message *m)
{
struct spi_transfer *t;
u8 *rx_buf = NULL;
unsigned int n_tx = 0;
unsigned int n_rx = 0;
struct fsl_espi_transfer espi_trans;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf)
n_tx += t->len;
if (t->rx_buf) {
n_rx += t->len;
rx_buf = t->rx_buf;
}
}
espi_trans.n_tx = n_tx;
espi_trans.n_rx = n_rx;
espi_trans.len = n_tx + n_rx;
espi_trans.actual_length = 0;
espi_trans.status = 0;
if (!rx_buf)
fsl_espi_cmd_trans(m, &espi_trans, NULL);
else
fsl_espi_rw_trans(m, &espi_trans, rx_buf);
m->actual_length = espi_trans.actual_length;
m->status = espi_trans.status;
m->complete(m->context);
}
static int fsl_espi_setup(struct spi_device *spi)
{
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
int retval;
u32 hw_mode;
u32 loop_mode;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
if (!spi->max_speed_hz)
return -EINVAL;
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
spi->controller_state = cs;
}
mpc8xxx_spi = spi_master_get_devdata(spi->master);
reg_base = mpc8xxx_spi->reg_base;
hw_mode = cs->hw_mode; /* Save original settings */
cs->hw_mode = mpc8xxx_spi_read_reg(
®_base->csmode[spi->chip_select]);
/* mask out bits we are going to set */
cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
| CSMODE_REV);
if (spi->mode & SPI_CPHA)
cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
if (spi->mode & SPI_CPOL)
cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
if (!(spi->mode & SPI_LSB_FIRST))
cs->hw_mode |= CSMODE_REV;
/* Handle the loop mode */
loop_mode = mpc8xxx_spi_read_reg(®_base->mode);
loop_mode &= ~SPMODE_LOOP;
if (spi->mode & SPI_LOOP)
loop_mode |= SPMODE_LOOP;
mpc8xxx_spi_write_reg(®_base->mode, loop_mode);
retval = fsl_espi_setup_transfer(spi, NULL);
if (retval < 0) {
cs->hw_mode = hw_mode; /* Restore settings */
return retval;
}
return 0;
}
void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
{
struct fsl_espi_reg *reg_base = mspi->reg_base;
/* We need handle RX first */
if (events & SPIE_NE) {
u32 rx_data, tmp;
u8 rx_data_8;
/* Spin until RX is done */
while (SPIE_RXCNT(events) < min(4, mspi->len)) {
cpu_relax();
events = mpc8xxx_spi_read_reg(®_base->event);
}
if (mspi->len >= 4) {
rx_data = mpc8xxx_spi_read_reg(®_base->receive);
} else {
tmp = mspi->len;
rx_data = 0;
while (tmp--) {
rx_data_8 = in_8((u8 *)®_base->receive);
rx_data |= (rx_data_8 << (tmp * 8));
}
rx_data <<= (4 - mspi->len) * 8;
}
mspi->len -= 4;
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
}
if (!(events & SPIE_NF)) {
int ret;
/* spin until TX is done */
ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
®_base->event)) & SPIE_NF) == 0, 1000, 0);
if (!ret) {
dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
return;
}
}
/* Clear the events */
mpc8xxx_spi_write_reg(®_base->event, events);
mspi->count -= 1;
if (mspi->count) {
u32 word = mspi->get_tx(mspi);
mpc8xxx_spi_write_reg(®_base->transmit, word);
} else {
complete(&mspi->done);
}
}
static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
{
struct mpc8xxx_spi *mspi = context_data;
struct fsl_espi_reg *reg_base = mspi->reg_base;
irqreturn_t ret = IRQ_NONE;
u32 events;
/* Get interrupt events(tx/rx) */
events = mpc8xxx_spi_read_reg(®_base->event);
if (events)
ret = IRQ_HANDLED;
dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
fsl_espi_cpu_irq(mspi, events);
return ret;
}
static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
{
iounmap(mspi->reg_base);
}
static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev->platform_data;
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
u32 regval;
int i, ret = 0;
master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
if (!master) {
ret = -ENOMEM;
goto err;
}
dev_set_drvdata(dev, master);
ret = mpc8xxx_spi_probe(dev, mem, irq);
if (ret)
goto err_probe;
master->setup = fsl_espi_setup;
mpc8xxx_spi = spi_master_get_devdata(master);
mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg;
mpc8xxx_spi->spi_remove = fsl_espi_remove;
mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
if (!mpc8xxx_spi->reg_base) {
ret = -ENOMEM;
goto err_probe;
}
reg_base = mpc8xxx_spi->reg_base;
/* Register for SPI Interrupt */
ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
0, "fsl_espi", mpc8xxx_spi);
if (ret)
goto free_irq;
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
mpc8xxx_spi->rx_shift = 16;
mpc8xxx_spi->tx_shift = 24;
}
/* SPI controller initializations */
mpc8xxx_spi_write_reg(®_base->mode, 0);
mpc8xxx_spi_write_reg(®_base->mask, 0);
mpc8xxx_spi_write_reg(®_base->command, 0);
mpc8xxx_spi_write_reg(®_base->event, 0xffffffff);
/* Init eSPI CS mode register */
for (i = 0; i < pdata->max_chipselect; i++)
mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL);
/* Enable SPI interface */
regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
mpc8xxx_spi_write_reg(®_base->mode, regval);
ret = spi_register_master(master);
if (ret < 0)
goto unreg_master;
dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
return master;
unreg_master:
free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
free_irq:
iounmap(mpc8xxx_spi->reg_base);
err_probe:
spi_master_put(master);
err:
return ERR_PTR(ret);
}
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
struct fsl_spi_platform_data *pdata = dev->platform_data;
const u32 *prop;
int len;
prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
if (!prop || len < sizeof(*prop)) {
dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
return -EINVAL;
}
pdata->max_chipselect = *prop;
pdata->cs_control = NULL;
return 0;
}
static int of_fsl_espi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct spi_master *master;
struct resource mem;
struct resource irq;
int ret = -ENOMEM;
ret = of_mpc8xxx_spi_probe(ofdev);
if (ret)
return ret;
ret = of_fsl_espi_get_chipselects(dev);
if (ret)
goto err;
ret = of_address_to_resource(np, 0, &mem);
if (ret)
goto err;
ret = of_irq_to_resource(np, 0, &irq);
if (!ret) {
ret = -EINVAL;
goto err;
}
master = fsl_espi_probe(dev, &mem, irq.start);
if (IS_ERR(master)) {
ret = PTR_ERR(master);
goto err;
}
return 0;
err:
return ret;
}
static int of_fsl_espi_remove(struct platform_device *dev)
{
return mpc8xxx_spi_remove(&dev->dev);
}
static const struct of_device_id of_fsl_espi_match[] = {
{ .compatible = "fsl,mpc8536-espi" },
{}
};
MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
static struct platform_driver fsl_espi_driver = {
.driver = {
.name = "fsl_espi",
.owner = THIS_MODULE,
.of_match_table = of_fsl_espi_match,
},
.probe = of_fsl_espi_probe,
.remove = of_fsl_espi_remove,
};
module_platform_driver(fsl_espi_driver);
MODULE_AUTHOR("Mingkai Hu");
MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
hendersa/bbbandroid-kernel | arch/s390/kernel/vdso.c | 2205 | 8413 | /*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/vdso.h>
#include <asm/facility.h>
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()?
*/
unsigned int __read_mostly vdso_enabled = 1;
static int __init vdso_setup(char *s)
{
unsigned long val;
int rc;
rc = 0;
if (strncmp(s, "on", 3) == 0)
vdso_enabled = 1;
else if (strncmp(s, "off", 4) == 0)
vdso_enabled = 0;
else {
rc = strict_strtoul(s, 0, &val);
vdso_enabled = rc ? 0 : !!val;
}
return !rc;
}
__setup("vdso=", vdso_setup);
/*
* The vdso data page
*/
static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Setup vdso data page.
*/
static void vdso_init_data(struct vdso_data *vd)
{
vd->ectg_available =
s390_user_mode != HOME_SPACE_MODE && test_facility(31);
}
#ifdef CONFIG_64BIT
/*
* Allocate/free per cpu vdso data.
*/
#define SEGMENT_ORDER 2
int vdso_alloc_per_cpu(struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
int i;
lowcore->vdso_per_cpu_data = __LC_PASTE;
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
page_frame = get_zeroed_page(GFP_KERNEL);
if (!segment_table || !page_table || !page_frame)
goto out;
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER);
clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_RO + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32;
for (i = 4; i < 32; i += 4)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
psal[0] = 0x20000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
aste[4] = (u32)(addr_t) psal;
lowcore->vdso_per_cpu_data = page_frame;
return 0;
out:
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
return -ENOMEM;
}
void vdso_free_per_cpu(struct _lowcore *lowcore)
{
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
aste = (u32 *)(addr_t) psal[2];
segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
page_table = *(unsigned long *) segment_table;
page_frame = *(unsigned long *) page_table;
free_page(page_frame);
free_page(page_table);
free_pages(segment_table, SEGMENT_ORDER);
}
static void vdso_init_cr5(void)
{
unsigned long cr5;
if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
#endif /* CONFIG_64BIT */
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
unsigned long vdso_pages;
unsigned long vdso_base;
int rc;
if (!vdso_enabled)
return 0;
/*
* Only map the vdso for dynamically linked elf binaries.
*/
if (!uses_interp)
return 0;
#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
if (is_compat_task()) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
}
#endif
#else
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
#endif
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
*/
if (vdso_pages == 0)
return 0;
current->mm->context.vdso_base = 0;
/*
* pick a base address for the vDSO in process space. We try to put
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto out_up;
}
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO (since arch_vma_name fails).
*/
current->mm->context.vdso_base = vdso_base;
/*
* our vma flags don't have VM_WRITE so by default, the process
* isn't allowed to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW
* on those pages but it's then your responsibility to never do that
* on the "data" page of the vDSO or you'll stop getting kernel
* updates and your nice userland gettimeofday will be totally dead.
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_pagelist);
if (rc)
current->mm->context.vdso_base = 0;
out_up:
up_write(&mm->mmap_sem);
return rc;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
return "[vdso]";
return NULL;
}
static int __init vdso_init(void)
{
int i;
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
GFP_KERNEL);
BUG_ON(vdso32_pagelist == NULL);
for (i = 0; i < vdso32_pages - 1; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso32_pagelist[i] = pg;
}
vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
vdso32_pagelist[vdso32_pages] = NULL;
#endif
#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
/* Make sure pages are in the correct state */
vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
GFP_KERNEL);
BUG_ON(vdso64_pagelist == NULL);
for (i = 0; i < vdso64_pages - 1; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
vdso64_pagelist[i] = pg;
}
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
vdso_init_cr5();
#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
smp_wmb();
return 0;
}
early_initcall(vdso_init);
int in_gate_area_no_mm(unsigned long addr)
{
return 0;
}
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
return NULL;
}
| gpl-2.0 |
kerneldevs/RM-35-KERNEL-PECAN | drivers/isdn/hisax/ix1_micro.c | 5021 | 8337 | /* $Id: ix1_micro.c,v 2.12.2.4 2004/01/13 23:48:39 keil Exp $
*
* low level stuff for ITK ix1-micro Rev.2 isdn cards
* derived from the original file teles3.c from Karsten Keil
*
* Author Klaus-Peter Nischke
* Copyright by Klaus-Peter Nischke, ITK AG
* <klaus@nischke.do.eunet.de>
* by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Klaus-Peter Nischke
* Deusener Str. 287
* 44369 Dortmund
* Germany
*/
#include <linux/init.h>
#include <linux/isapnp.h>
#include "hisax.h"
#include "isac.h"
#include "hscx.h"
#include "isdnl1.h"
static const char *ix1_revision = "$Revision: 2.12.2.4 $";
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
#define SPECIAL_PORT_OFFSET 3
#define ISAC_COMMAND_OFFSET 2
#define ISAC_DATA_OFFSET 0
#define HSCX_COMMAND_OFFSET 2
#define HSCX_DATA_OFFSET 1
#define TIMEOUT 50
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
byteout(ale, off);
ret = bytein(adr);
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
insb(adr, data, size);
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
byteout(ale, off);
byteout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.ix1.hscx_ale,
cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.ix1.hscx_ale,
cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0), value);
}
#define READHSCX(cs, nr, reg) readreg(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
ix1micro_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
if (val) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
if (val) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0xFF);
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_ix1micro(struct IsdnCardState *cs)
{
if (cs->hw.ix1.cfg_reg)
release_region(cs->hw.ix1.cfg_reg, 4);
}
static void
ix1_reset(struct IsdnCardState *cs)
{
int cnt;
/* reset isac */
cnt = 3 * (HZ / 10) + 1;
while (cnt--) {
byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 1);
HZDELAY(1); /* wait >=10 ms */
}
byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 0);
}
static int
ix1_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
ix1_reset(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_ix1micro(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
ix1_reset(cs);
inithscxisac(cs, 3);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
return(0);
}
return(0);
}
#ifdef __ISAPNP__
static struct isapnp_device_id itk_ids[] __devinitdata = {
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
(unsigned long) "ITK micro 2" },
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
(unsigned long) "ITK micro 2." },
{ 0, }
};
static struct isapnp_device_id *ipid __devinitdata = &itk_ids[0];
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_ix1micro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, ix1_revision);
printk(KERN_INFO "HiSax: ITK IX1 driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_IX1MICROR2)
return (0);
#ifdef __ISAPNP__
if (!card->para[1] && isapnp_present()) {
struct pnp_dev *pnp_d;
while(ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
return(0);
}
break;
} else {
printk(KERN_ERR "ITK PnP: PnP error card found, no device\n");
}
}
ipid++;
pnp_c = NULL;
}
if (!ipid->card_vendor) {
printk(KERN_INFO "ITK PnP: no ISAPnP card found\n");
return(0);
}
}
#endif
/* IO-Ports */
cs->hw.ix1.isac_ale = card->para[1] + ISAC_COMMAND_OFFSET;
cs->hw.ix1.hscx_ale = card->para[1] + HSCX_COMMAND_OFFSET;
cs->hw.ix1.isac = card->para[1] + ISAC_DATA_OFFSET;
cs->hw.ix1.hscx = card->para[1] + HSCX_DATA_OFFSET;
cs->hw.ix1.cfg_reg = card->para[1];
cs->irq = card->para[0];
if (cs->hw.ix1.cfg_reg) {
if (!request_region(cs->hw.ix1.cfg_reg, 4, "ix1micro cfg")) {
printk(KERN_WARNING
"HiSax: ITK ix1-micro Rev.2 config port "
"%x-%x already in use\n",
cs->hw.ix1.cfg_reg,
cs->hw.ix1.cfg_reg + 4);
return (0);
}
}
printk(KERN_INFO "HiSax: ITK ix1-micro Rev.2 config irq:%d io:0x%X\n",
cs->irq, cs->hw.ix1.cfg_reg);
setup_isac(cs);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &ix1_card_msg;
cs->irq_func = &ix1micro_interrupt;
ISACVersion(cs, "ix1-Micro:");
if (HscxVersion(cs, "ix1-Micro:")) {
printk(KERN_WARNING
"ix1-Micro: wrong HSCX versions check IO address\n");
release_io_ix1micro(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
TheBr0ken/vigor_aosp_kernel | drivers/isdn/hisax/ix1_micro.c | 5021 | 8337 | /* $Id: ix1_micro.c,v 2.12.2.4 2004/01/13 23:48:39 keil Exp $
*
* low level stuff for ITK ix1-micro Rev.2 isdn cards
* derived from the original file teles3.c from Karsten Keil
*
* Author Klaus-Peter Nischke
* Copyright by Klaus-Peter Nischke, ITK AG
* <klaus@nischke.do.eunet.de>
* by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* Klaus-Peter Nischke
* Deusener Str. 287
* 44369 Dortmund
* Germany
*/
#include <linux/init.h>
#include <linux/isapnp.h>
#include "hisax.h"
#include "isac.h"
#include "hscx.h"
#include "isdnl1.h"
static const char *ix1_revision = "$Revision: 2.12.2.4 $";
#define byteout(addr,val) outb(val,addr)
#define bytein(addr) inb(addr)
#define SPECIAL_PORT_OFFSET 3
#define ISAC_COMMAND_OFFSET 2
#define ISAC_DATA_OFFSET 0
#define HSCX_COMMAND_OFFSET 2
#define HSCX_DATA_OFFSET 1
#define TIMEOUT 50
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
byteout(ale, off);
ret = bytein(adr);
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
insb(adr, data, size);
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
byteout(ale, off);
byteout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
byteout(ale, off);
outsb(adr, data, size);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, offset, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, 0, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.ix1.hscx_ale,
cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.ix1.hscx_ale,
cs->hw.ix1.hscx, offset + (hscx ? 0x40 : 0), value);
}
#define READHSCX(cs, nr, reg) readreg(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ix1.hscx_ale, \
cs->hw.ix1.hscx, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
ix1micro_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
Start_HSCX:
if (val)
hscx_int_main(cs, val);
val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readreg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_ISTA + 0x40);
if (val) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "HSCX IntStat after IntRoutine");
goto Start_HSCX;
}
val = readreg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_ISTA);
if (val) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0xFF);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0xFF);
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0xFF);
writereg(cs->hw.ix1.isac_ale, cs->hw.ix1.isac, ISAC_MASK, 0);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK, 0);
writereg(cs->hw.ix1.hscx_ale, cs->hw.ix1.hscx, HSCX_MASK + 0x40, 0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_ix1micro(struct IsdnCardState *cs)
{
if (cs->hw.ix1.cfg_reg)
release_region(cs->hw.ix1.cfg_reg, 4);
}
static void
ix1_reset(struct IsdnCardState *cs)
{
int cnt;
/* reset isac */
cnt = 3 * (HZ / 10) + 1;
while (cnt--) {
byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 1);
HZDELAY(1); /* wait >=10 ms */
}
byteout(cs->hw.ix1.cfg_reg + SPECIAL_PORT_OFFSET, 0);
}
static int
ix1_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
ix1_reset(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_RELEASE:
release_io_ix1micro(cs);
return(0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
ix1_reset(cs);
inithscxisac(cs, 3);
spin_unlock_irqrestore(&cs->lock, flags);
return(0);
case CARD_TEST:
return(0);
}
return(0);
}
#ifdef __ISAPNP__
static struct isapnp_device_id itk_ids[] __devinitdata = {
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x25),
(unsigned long) "ITK micro 2" },
{ ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
ISAPNP_VENDOR('I', 'T', 'K'), ISAPNP_FUNCTION(0x29),
(unsigned long) "ITK micro 2." },
{ 0, }
};
static struct isapnp_device_id *ipid __devinitdata = &itk_ids[0];
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_ix1micro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, ix1_revision);
printk(KERN_INFO "HiSax: ITK IX1 driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_IX1MICROR2)
return (0);
#ifdef __ISAPNP__
if (!card->para[1] && isapnp_present()) {
struct pnp_dev *pnp_d;
while(ipid->card_vendor) {
if ((pnp_c = pnp_find_card(ipid->card_vendor,
ipid->card_device, pnp_c))) {
pnp_d = NULL;
if ((pnp_d = pnp_find_dev(pnp_c,
ipid->vendor, ipid->function, pnp_d))) {
int err;
printk(KERN_INFO "HiSax: %s detected\n",
(char *)ipid->driver_data);
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err<0) {
printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
__func__, err);
return(0);
}
card->para[1] = pnp_port_start(pnp_d, 0);
card->para[0] = pnp_irq(pnp_d, 0);
if (!card->para[0] || !card->para[1]) {
printk(KERN_ERR "ITK PnP:some resources are missing %ld/%lx\n",
card->para[0], card->para[1]);
pnp_disable_dev(pnp_d);
return(0);
}
break;
} else {
printk(KERN_ERR "ITK PnP: PnP error card found, no device\n");
}
}
ipid++;
pnp_c = NULL;
}
if (!ipid->card_vendor) {
printk(KERN_INFO "ITK PnP: no ISAPnP card found\n");
return(0);
}
}
#endif
/* IO-Ports */
cs->hw.ix1.isac_ale = card->para[1] + ISAC_COMMAND_OFFSET;
cs->hw.ix1.hscx_ale = card->para[1] + HSCX_COMMAND_OFFSET;
cs->hw.ix1.isac = card->para[1] + ISAC_DATA_OFFSET;
cs->hw.ix1.hscx = card->para[1] + HSCX_DATA_OFFSET;
cs->hw.ix1.cfg_reg = card->para[1];
cs->irq = card->para[0];
if (cs->hw.ix1.cfg_reg) {
if (!request_region(cs->hw.ix1.cfg_reg, 4, "ix1micro cfg")) {
printk(KERN_WARNING
"HiSax: ITK ix1-micro Rev.2 config port "
"%x-%x already in use\n",
cs->hw.ix1.cfg_reg,
cs->hw.ix1.cfg_reg + 4);
return (0);
}
}
printk(KERN_INFO "HiSax: ITK ix1-micro Rev.2 config irq:%d io:0x%X\n",
cs->irq, cs->hw.ix1.cfg_reg);
setup_isac(cs);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &ix1_card_msg;
cs->irq_func = &ix1micro_interrupt;
ISACVersion(cs, "ix1-Micro:");
if (HscxVersion(cs, "ix1-Micro:")) {
printk(KERN_WARNING
"ix1-Micro: wrong HSCX versions check IO address\n");
release_io_ix1micro(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
Soorma07/linux-davinci | net/l2tp/l2tp_netlink.c | 7837 | 21384 | /*
* L2TP netlink layer, for management
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* Partly based on the IrDA nelink implementation
* (see net/irda/irnetlink.c) which is:
* Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
* which is in turn partly based on the wireless netlink code:
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <net/sock.h>
#include <net/genetlink.h>
#include <net/udp.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/socket.h>
#include <linux/module.h>
#include <linux/list.h>
#include <net/net_namespace.h>
#include <linux/l2tp.h>
#include "l2tp_core.h"
static struct genl_family l2tp_nl_family = {
.id = GENL_ID_GENERATE,
.name = L2TP_GENL_NAME,
.version = L2TP_GENL_VERSION,
.hdrsize = 0,
.maxattr = L2TP_ATTR_MAX,
};
/* Accessed under genl lock */
static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
{
u32 tunnel_id;
u32 session_id;
char *ifname;
struct l2tp_tunnel *tunnel;
struct l2tp_session *session = NULL;
struct net *net = genl_info_net(info);
if (info->attrs[L2TP_ATTR_IFNAME]) {
ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
session = l2tp_session_find_by_ifname(net, ifname);
} else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
(info->attrs[L2TP_ATTR_CONN_ID])) {
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel)
session = l2tp_session_find(net, tunnel, session_id);
}
return session;
}
static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *hdr;
int ret = -ENOBUFS;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
}
hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
&l2tp_nl_family, 0, L2TP_CMD_NOOP);
if (IS_ERR(hdr)) {
ret = PTR_ERR(hdr);
goto err_out;
}
genlmsg_end(msg, hdr);
return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
err_out:
nlmsg_free(msg);
out:
return ret;
}
static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
{
u32 tunnel_id;
u32 peer_tunnel_id;
int proto_version;
int fd;
int ret = 0;
struct l2tp_tunnel_cfg cfg = { 0, };
struct l2tp_tunnel *tunnel;
struct net *net = genl_info_net(info);
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
ret = -EINVAL;
goto out;
}
peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
ret = -EINVAL;
goto out;
}
proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
ret = -EINVAL;
goto out;
}
cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
fd = -1;
if (info->attrs[L2TP_ATTR_FD]) {
fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
} else {
if (info->attrs[L2TP_ATTR_IP_SADDR])
cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
if (info->attrs[L2TP_ATTR_IP_DADDR])
cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
if (info->attrs[L2TP_ATTR_UDP_SPORT])
cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
if (info->attrs[L2TP_ATTR_UDP_DPORT])
cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
if (info->attrs[L2TP_ATTR_UDP_CSUM])
cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
}
if (info->attrs[L2TP_ATTR_DEBUG])
cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel != NULL) {
ret = -EEXIST;
goto out;
}
ret = -EINVAL;
switch (cfg.encap) {
case L2TP_ENCAPTYPE_UDP:
case L2TP_ENCAPTYPE_IP:
ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
peer_tunnel_id, &cfg, &tunnel);
break;
}
out:
return ret;
}
static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
{
struct l2tp_tunnel *tunnel;
u32 tunnel_id;
int ret = 0;
struct net *net = genl_info_net(info);
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
ret = -ENODEV;
goto out;
}
(void) l2tp_tunnel_delete(tunnel);
out:
return ret;
}
static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
{
struct l2tp_tunnel *tunnel;
u32 tunnel_id;
int ret = 0;
struct net *net = genl_info_net(info);
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
ret = -ENODEV;
goto out;
}
if (info->attrs[L2TP_ATTR_DEBUG])
tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
out:
return ret;
}
static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
struct l2tp_tunnel *tunnel)
{
void *hdr;
struct nlattr *nest;
struct sock *sk = NULL;
struct inet_sock *inet;
hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
L2TP_CMD_TUNNEL_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
nest = nla_nest_start(skb, L2TP_ATTR_STATS);
if (nest == NULL)
goto nla_put_failure;
NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
nla_nest_end(skb, nest);
sk = tunnel->sock;
if (!sk)
goto out;
inet = inet_sk(sk);
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
/* NOBREAK */
case L2TP_ENCAPTYPE_IP:
NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
break;
}
out:
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -1;
}
static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
{
struct l2tp_tunnel *tunnel;
struct sk_buff *msg;
u32 tunnel_id;
int ret = -ENOBUFS;
struct net *net = genl_info_net(info);
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (tunnel == NULL) {
ret = -ENODEV;
goto out;
}
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
}
ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
NLM_F_ACK, tunnel);
if (ret < 0)
goto err_out;
return genlmsg_unicast(net, msg, info->snd_pid);
err_out:
nlmsg_free(msg);
out:
return ret;
}
static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int ti = cb->args[0];
struct l2tp_tunnel *tunnel;
struct net *net = sock_net(skb->sk);
for (;;) {
tunnel = l2tp_tunnel_find_nth(net, ti);
if (tunnel == NULL)
goto out;
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
tunnel) <= 0)
goto out;
ti++;
}
out:
cb->args[0] = ti;
return skb->len;
}
static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
{
u32 tunnel_id = 0;
u32 session_id;
u32 peer_session_id;
int ret = 0;
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_session_cfg cfg = { 0, };
struct net *net = genl_info_net(info);
if (!info->attrs[L2TP_ATTR_CONN_ID]) {
ret = -EINVAL;
goto out;
}
tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (!tunnel) {
ret = -ENODEV;
goto out;
}
if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
ret = -EINVAL;
goto out;
}
session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
session = l2tp_session_find(net, tunnel, session_id);
if (session) {
ret = -EEXIST;
goto out;
}
if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
ret = -EINVAL;
goto out;
}
peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
ret = -EINVAL;
goto out;
}
cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
ret = -EINVAL;
goto out;
}
if (tunnel->version > 2) {
if (info->attrs[L2TP_ATTR_OFFSET])
cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
if (info->attrs[L2TP_ATTR_DATA_SEQ])
cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
cfg.l2specific_len = 4;
if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
if (info->attrs[L2TP_ATTR_COOKIE]) {
u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
if (len > 8) {
ret = -EINVAL;
goto out;
}
cfg.cookie_len = len;
memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
}
if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
if (len > 8) {
ret = -EINVAL;
goto out;
}
cfg.peer_cookie_len = len;
memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
}
if (info->attrs[L2TP_ATTR_IFNAME])
cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
if (info->attrs[L2TP_ATTR_VLAN_ID])
cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
}
if (info->attrs[L2TP_ATTR_DEBUG])
cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
if (info->attrs[L2TP_ATTR_RECV_SEQ])
cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
if (info->attrs[L2TP_ATTR_SEND_SEQ])
cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
if (info->attrs[L2TP_ATTR_LNS_MODE])
cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
if (info->attrs[L2TP_ATTR_MTU])
cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
if (info->attrs[L2TP_ATTR_MRU])
cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
(l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
ret = -EPROTONOSUPPORT;
goto out;
}
/* Check that pseudowire-specific params are present */
switch (cfg.pw_type) {
case L2TP_PWTYPE_NONE:
break;
case L2TP_PWTYPE_ETH_VLAN:
if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
ret = -EINVAL;
goto out;
}
break;
case L2TP_PWTYPE_ETH:
break;
case L2TP_PWTYPE_PPP:
case L2TP_PWTYPE_PPP_AC:
break;
case L2TP_PWTYPE_IP:
default:
ret = -EPROTONOSUPPORT;
break;
}
ret = -EPROTONOSUPPORT;
if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
session_id, peer_session_id, &cfg);
out:
return ret;
}
static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
{
int ret = 0;
struct l2tp_session *session;
u16 pw_type;
session = l2tp_nl_session_find(info);
if (session == NULL) {
ret = -ENODEV;
goto out;
}
pw_type = session->pwtype;
if (pw_type < __L2TP_PWTYPE_MAX)
if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
out:
return ret;
}
static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
{
int ret = 0;
struct l2tp_session *session;
session = l2tp_nl_session_find(info);
if (session == NULL) {
ret = -ENODEV;
goto out;
}
if (info->attrs[L2TP_ATTR_DEBUG])
session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
if (info->attrs[L2TP_ATTR_DATA_SEQ])
session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
if (info->attrs[L2TP_ATTR_RECV_SEQ])
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
if (info->attrs[L2TP_ATTR_SEND_SEQ])
session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
if (info->attrs[L2TP_ATTR_LNS_MODE])
session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
if (info->attrs[L2TP_ATTR_MTU])
session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
if (info->attrs[L2TP_ATTR_MRU])
session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
out:
return ret;
}
static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
struct l2tp_session *session)
{
void *hdr;
struct nlattr *nest;
struct l2tp_tunnel *tunnel = session->tunnel;
struct sock *sk = NULL;
sk = tunnel->sock;
hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
if (session->mru)
NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
if (session->ifname && session->ifname[0])
NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
if (session->cookie_len)
NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
if (session->peer_cookie_len)
NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
#ifdef CONFIG_XFRM
if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
#endif
if (session->reorder_timeout)
NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
nest = nla_nest_start(skb, L2TP_ATTR_STATS);
if (nest == NULL)
goto nla_put_failure;
NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
nla_nest_end(skb, nest);
return genlmsg_end(skb, hdr);
nla_put_failure:
genlmsg_cancel(skb, hdr);
return -1;
}
static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
{
struct l2tp_session *session;
struct sk_buff *msg;
int ret;
session = l2tp_nl_session_find(info);
if (session == NULL) {
ret = -ENODEV;
goto out;
}
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
}
ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
0, session);
if (ret < 0)
goto err_out;
return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
err_out:
nlmsg_free(msg);
out:
return ret;
}
static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int ti = cb->args[0];
int si = cb->args[1];
for (;;) {
if (tunnel == NULL) {
tunnel = l2tp_tunnel_find_nth(net, ti);
if (tunnel == NULL)
goto out;
}
session = l2tp_session_find_nth(tunnel, si);
if (session == NULL) {
ti++;
tunnel = NULL;
si = 0;
continue;
}
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
session) <= 0)
break;
si++;
}
out:
cb->args[0] = ti;
cb->args[1] = si;
return skb->len;
}
static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
[L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
[L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
[L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
[L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
[L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
[L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
[L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
[L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
[L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
[L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
[L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
[L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
[L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
[L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
[L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
[L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
[L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
[L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
[L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
[L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
[L2TP_ATTR_FD] = { .type = NLA_U32, },
[L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
[L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
[L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
[L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
[L2TP_ATTR_MTU] = { .type = NLA_U16, },
[L2TP_ATTR_MRU] = { .type = NLA_U16, },
[L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
[L2TP_ATTR_IFNAME] = {
.type = NLA_NUL_STRING,
.len = IFNAMSIZ - 1,
},
[L2TP_ATTR_COOKIE] = {
.type = NLA_BINARY,
.len = 8,
},
[L2TP_ATTR_PEER_COOKIE] = {
.type = NLA_BINARY,
.len = 8,
},
};
static struct genl_ops l2tp_nl_ops[] = {
{
.cmd = L2TP_CMD_NOOP,
.doit = l2tp_nl_cmd_noop,
.policy = l2tp_nl_policy,
/* can be retrieved by unprivileged users */
},
{
.cmd = L2TP_CMD_TUNNEL_CREATE,
.doit = l2tp_nl_cmd_tunnel_create,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_DELETE,
.doit = l2tp_nl_cmd_tunnel_delete,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_MODIFY,
.doit = l2tp_nl_cmd_tunnel_modify,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_TUNNEL_GET,
.doit = l2tp_nl_cmd_tunnel_get,
.dumpit = l2tp_nl_cmd_tunnel_dump,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_CREATE,
.doit = l2tp_nl_cmd_session_create,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_DELETE,
.doit = l2tp_nl_cmd_session_delete,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_MODIFY,
.doit = l2tp_nl_cmd_session_modify,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = L2TP_CMD_SESSION_GET,
.doit = l2tp_nl_cmd_session_get,
.dumpit = l2tp_nl_cmd_session_dump,
.policy = l2tp_nl_policy,
.flags = GENL_ADMIN_PERM,
},
};
int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
{
int ret;
ret = -EINVAL;
if (pw_type >= __L2TP_PWTYPE_MAX)
goto err;
genl_lock();
ret = -EBUSY;
if (l2tp_nl_cmd_ops[pw_type])
goto out;
l2tp_nl_cmd_ops[pw_type] = ops;
ret = 0;
out:
genl_unlock();
err:
return ret;
}
EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
{
if (pw_type < __L2TP_PWTYPE_MAX) {
genl_lock();
l2tp_nl_cmd_ops[pw_type] = NULL;
genl_unlock();
}
}
EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
static int l2tp_nl_init(void)
{
int err;
printk(KERN_INFO "L2TP netlink interface\n");
err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
ARRAY_SIZE(l2tp_nl_ops));
return err;
}
static void l2tp_nl_cleanup(void)
{
genl_unregister_family(&l2tp_nl_family);
}
module_init(l2tp_nl_init);
module_exit(l2tp_nl_cleanup);
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP netlink");
MODULE_LICENSE("GPL");
MODULE_VERSION("1.0");
MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
__stringify(NETLINK_GENERIC) "-type-" "l2tp");
| gpl-2.0 |
MikePach/Alucard-Kernel-jfltexx | drivers/scsi/dpt_i2o.c | 8093 | 97076 | /***************************************************************************
dpti.c - description
-------------------
begin : Thu Sep 7 2000
copyright : (C) 2000 by Adaptec
July 30, 2001 First version being submitted
for inclusion in the kernel. V2.4
See Documentation/scsi/dpti.txt for history, notes, license info
and credits
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
/***************************************************************************
* Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
- Support 2.6 kernel and DMA-mapping
- ioctl fix for raid tools
- use schedule_timeout in long long loop
**************************************************************************/
/*#define DEBUG 1 */
/*#define UARTDELAY 1 */
#include <linux/module.h>
MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
////////////////////////////////////////////////////////////////
#include <linux/ioctl.h> /* For SCSI-Passthrough */
#include <asm/uaccess.h>
#include <linux/stat.h>
#include <linux/slab.h> /* for kmalloc() */
#include <linux/pci.h> /* for PCI support */
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
#include <linux/delay.h> /* for udelay */
#include <linux/interrupt.h>
#include <linux/kernel.h> /* for printk */
#include <linux/sched.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <asm/processor.h> /* for boot_cpu_data */
#include <asm/pgtable.h>
#include <asm/io.h> /* for virt_to_bus, etc. */
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include "dpt/dptsig.h"
#include "dpti.h"
/*============================================================================
* Create a binary signature - this is read by dptsig
* Needed for our management apps
*============================================================================
*/
static DEFINE_MUTEX(adpt_mutex);
static dpt_sig_S DPTI_sig = {
{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
#ifdef __i386__
PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
#elif defined(__ia64__)
PROC_INTEL, PROC_IA64,
#elif defined(__sparc__)
PROC_ULTRASPARC, PROC_ULTRASPARC,
#elif defined(__alpha__)
PROC_ALPHA, PROC_ALPHA,
#else
(-1),(-1),
#endif
FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
};
/*============================================================================
* Globals
*============================================================================
*/
static DEFINE_MUTEX(adpt_configuration_lock);
static struct i2o_sys_tbl *sys_tbl;
static dma_addr_t sys_tbl_pa;
static int sys_tbl_ind;
static int sys_tbl_len;
static adpt_hba* hba_chain = NULL;
static int hba_count = 0;
static struct class *adpt_sysfs_class;
static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
#endif
static const struct file_operations adpt_fops = {
.unlocked_ioctl = adpt_unlocked_ioctl,
.open = adpt_open,
.release = adpt_close,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_adpt_ioctl,
#endif
.llseek = noop_llseek,
};
/* Structures and definitions for synchronous message posting.
* See adpt_i2o_post_wait() for description
* */
struct adpt_i2o_post_wait_data
{
int status;
u32 id;
adpt_wait_queue_head_t *wq;
struct adpt_i2o_post_wait_data *next;
};
static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
static u32 adpt_post_wait_id = 0;
static DEFINE_SPINLOCK(adpt_post_wait_lock);
/*============================================================================
* Functions
*============================================================================
*/
static inline int dpt_dma64(adpt_hba *pHba)
{
return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
}
static inline u32 dma_high(dma_addr_t addr)
{
return upper_32_bits(addr);
}
static inline u32 dma_low(dma_addr_t addr)
{
return (u32)addr;
}
static u8 adpt_read_blink_led(adpt_hba* host)
{
if (host->FwDebugBLEDflag_P) {
if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
return readb(host->FwDebugBLEDvalue_P);
}
}
return 0;
}
/*============================================================================
* Scsi host template interface functions
*============================================================================
*/
static struct pci_device_id dptids[] = {
{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, }
};
MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
{
struct pci_dev *pDev = NULL;
adpt_hba *pHba;
adpt_hba *next;
PINFO("Detecting Adaptec I2O RAID controllers...\n");
/* search for all Adatpec I2O RAID cards */
while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
if(pDev->device == PCI_DPT_DEVICE_ID ||
pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
if(adpt_install_hba(sht, pDev) ){
PERROR("Could not Init an I2O RAID device\n");
PERROR("Will not try to detect others.\n");
return hba_count-1;
}
pci_dev_get(pDev);
}
}
/* In INIT state, Activate IOPs */
for (pHba = hba_chain; pHba; pHba = next) {
next = pHba->next;
// Activate does get status , init outbound, and get hrt
if (adpt_i2o_activate_hba(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
}
}
/* Active IOPs in HOLD state */
rebuild_sys_tab:
if (hba_chain == NULL)
return 0;
/*
* If build_sys_table fails, we kill everything and bail
* as we can't init the IOPs w/o a system table
*/
if (adpt_i2o_build_sys_table() < 0) {
adpt_i2o_sys_shutdown();
return 0;
}
PDEBUG("HBA's in HOLD state\n");
/* If IOP don't get online, we need to rebuild the System table */
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if (adpt_i2o_online_hba(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
goto rebuild_sys_tab;
}
}
/* Active IOPs now in OPERATIONAL state */
PDEBUG("HBA's in OPERATIONAL state\n");
printk("dpti: If you have a lot of devices this could take a few minutes.\n");
for (pHba = hba_chain; pHba; pHba = next) {
next = pHba->next;
printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
if (adpt_i2o_lct_get(pHba) < 0){
adpt_i2o_delete_hba(pHba);
continue;
}
if (adpt_i2o_parse_lct(pHba) < 0){
adpt_i2o_delete_hba(pHba);
continue;
}
adpt_inquiry(pHba);
}
adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
if (IS_ERR(adpt_sysfs_class)) {
printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
adpt_sysfs_class = NULL;
}
for (pHba = hba_chain; pHba; pHba = next) {
next = pHba->next;
if (adpt_scsi_host_alloc(pHba, sht) < 0){
adpt_i2o_delete_hba(pHba);
continue;
}
pHba->initialized = TRUE;
pHba->state &= ~DPTI_STATE_RESET;
if (adpt_sysfs_class) {
struct device *dev = device_create(adpt_sysfs_class,
NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
"dpti%d", pHba->unit);
if (IS_ERR(dev)) {
printk(KERN_WARNING"dpti%d: unable to "
"create device in dpt_i2o class\n",
pHba->unit);
}
}
}
// Register our control device node
// nodes will need to be created in /dev to access this
// the nodes can not be created from within the driver
if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
adpt_i2o_sys_shutdown();
return 0;
}
return hba_count;
}
/*
* scsi_unregister will be called AFTER we return.
*/
static int adpt_release(struct Scsi_Host *host)
{
adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
// adpt_i2o_quiesce_hba(pHba);
adpt_i2o_delete_hba(pHba);
scsi_unregister(host);
return 0;
}
static void adpt_inquiry(adpt_hba* pHba)
{
u32 msg[17];
u32 *mptr;
u32 *lenptr;
int direction;
int scsidir;
u32 len;
u32 reqlen;
u8* buf;
dma_addr_t addr;
u8 scb[16];
s32 rcode;
memset(msg, 0, sizeof(msg));
buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
if(!buf){
printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
return;
}
memset((void*)buf, 0, 36);
len = 36;
direction = 0x00000000;
scsidir =0x40000000; // DATA IN (iop<--dev)
if (dpt_dma64(pHba))
reqlen = 17; // SINGLE SGE, 64 bit
else
reqlen = 14; // SINGLE SGE, 32 bit
/* Stick the headers on */
msg[0] = reqlen<<16 | SGL_OFFSET_12;
msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
msg[2] = 0;
msg[3] = 0;
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
/* Direction, disconnect ok | sense data | simple queue , CDBLen */
// I2O_SCB_FLAG_ENABLE_DISCONNECT |
// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
mptr=msg+7;
memset(scb, 0, sizeof(scb));
// Write SCSI command into the message - always 16 byte block
scb[0] = INQUIRY;
scb[1] = 0;
scb[2] = 0;
scb[3] = 0;
scb[4] = 36;
scb[5] = 0;
// Don't care about the rest of scb
memcpy(mptr, scb, sizeof(scb));
mptr+=4;
lenptr=mptr++; /* Remember me - fill in when we know */
/* Now fill in the SGList and command */
*lenptr = len;
if (dpt_dma64(pHba)) {
*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
*mptr++ = 1 << PAGE_SHIFT;
*mptr++ = 0xD0000000|direction|len;
*mptr++ = dma_low(addr);
*mptr++ = dma_high(addr);
} else {
*mptr++ = 0xD0000000|direction|len;
*mptr++ = addr;
}
// Send it on it's way
rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
if (rcode != 0) {
sprintf(pHba->detail, "Adaptec I2O RAID");
printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
if (rcode != -ETIME && rcode != -EINTR)
dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
} else {
memset(pHba->detail, 0, sizeof(pHba->detail));
memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
memcpy(&(pHba->detail[16]), " Model: ", 8);
memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
memcpy(&(pHba->detail[40]), " FW: ", 4);
memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
pHba->detail[48] = '\0'; /* precautionary */
dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
}
adpt_i2o_status_get(pHba);
return ;
}
static int adpt_slave_configure(struct scsi_device * device)
{
struct Scsi_Host *host = device->host;
adpt_hba* pHba;
pHba = (adpt_hba *) host->hostdata[0];
if (host->can_queue && device->tagged_supported) {
scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
host->can_queue - 1);
} else {
scsi_adjust_queue_depth(device, 0, 1);
}
return 0;
}
static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
adpt_hba* pHba = NULL;
struct adpt_device* pDev = NULL; /* dpt per device information */
cmd->scsi_done = done;
/*
* SCSI REQUEST_SENSE commands will be executed automatically by the
* Host Adapter for any errors, so they should not be executed
* explicitly unless the Sense Data is zero indicating that no error
* occurred.
*/
if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);
return 0;
}
pHba = (adpt_hba*)cmd->device->host->hostdata[0];
if (!pHba) {
return FAILED;
}
rmb();
/*
* TODO: I need to block here if I am processing ioctl cmds
* but if the outstanding cmds all finish before the ioctl,
* the scsi-core will not know to start sending cmds to me again.
* I need to a way to restart the scsi-cores queues or should I block
* calling scsi_done on the outstanding cmds instead
* for now we don't set the IOCTL state
*/
if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
pHba->host->last_reset = jiffies;
pHba->host->resetting = 1;
return 1;
}
// TODO if the cmd->device if offline then I may need to issue a bus rescan
// followed by a get_lct to see if the device is there anymore
if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
/*
* First command request for this device. Set up a pointer
* to the device structure. This should be a TEST_UNIT_READY
* command from scan_scsis_single.
*/
if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
cmd->result = (DID_NO_CONNECT << 16);
cmd->scsi_done(cmd);
return 0;
}
cmd->device->hostdata = pDev;
}
pDev->pScsi_dev = cmd->device;
/*
* If we are being called from when the device is being reset,
* delay processing of the command until later.
*/
if (pDev->state & DPTI_DEV_RESET ) {
return FAILED;
}
return adpt_scsi_to_i2o(pHba, cmd, pDev);
}
static DEF_SCSI_QCMD(adpt_queue)
static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
sector_t capacity, int geom[])
{
int heads=-1;
int sectors=-1;
int cylinders=-1;
// *** First lets set the default geometry ****
// If the capacity is less than ox2000
if (capacity < 0x2000 ) { // floppy
heads = 18;
sectors = 2;
}
// else if between 0x2000 and 0x20000
else if (capacity < 0x20000) {
heads = 64;
sectors = 32;
}
// else if between 0x20000 and 0x40000
else if (capacity < 0x40000) {
heads = 65;
sectors = 63;
}
// else if between 0x4000 and 0x80000
else if (capacity < 0x80000) {
heads = 128;
sectors = 63;
}
// else if greater than 0x80000
else {
heads = 255;
sectors = 63;
}
cylinders = sector_div(capacity, heads * sectors);
// Special case if CDROM
if(sdev->type == 5) { // CDROM
heads = 252;
sectors = 63;
cylinders = 1111;
}
geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
PDEBUG("adpt_bios_param: exit\n");
return 0;
}
static const char *adpt_info(struct Scsi_Host *host)
{
adpt_hba* pHba;
pHba = (adpt_hba *) host->hostdata[0];
return (char *) (pHba->detail);
}
static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
int length, int inout)
{
struct adpt_device* d;
int id;
int chan;
int len = 0;
int begin = 0;
int pos = 0;
adpt_hba* pHba;
int unit;
*start = buffer;
if (inout == TRUE) {
/*
* The user has done a write and wants us to take the
* data in the buffer and do something with it.
* proc_scsiwrite calls us with inout = 1
*
* Read data from buffer (writing to us) - NOT SUPPORTED
*/
return -EINVAL;
}
/*
* inout = 0 means the user has done a read and wants information
* returned, so we write information about the cards into the buffer
* proc_scsiread() calls us with inout = 0
*/
// Find HBA (host bus adapter) we are looking for
mutex_lock(&adpt_configuration_lock);
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if (pHba->host == host) {
break; /* found adapter */
}
}
mutex_unlock(&adpt_configuration_lock);
if (pHba == NULL) {
return 0;
}
host = pHba->host;
len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
len += sprintf(buffer+len, "%s\n", pHba->detail);
len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
pHba->host->host_no, pHba->name, host->irq);
len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
pos = begin + len;
/* CHECKPOINT */
if(pos > offset + length) {
goto stop_output;
}
if(pos <= offset) {
/*
* If we haven't even written to where we last left
* off (the last time we were called), reset the
* beginning pointer.
*/
len = 0;
begin = pos;
}
len += sprintf(buffer+len, "Devices:\n");
for(chan = 0; chan < MAX_CHANNEL; chan++) {
for(id = 0; id < MAX_ID; id++) {
d = pHba->channel[chan].device[id];
while(d){
len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
pos = begin + len;
/* CHECKPOINT */
if(pos > offset + length) {
goto stop_output;
}
if(pos <= offset) {
len = 0;
begin = pos;
}
unit = d->pI2o_dev->lct_data.tid;
len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
scsi_device_online(d->pScsi_dev)? "online":"offline");
pos = begin + len;
/* CHECKPOINT */
if(pos > offset + length) {
goto stop_output;
}
if(pos <= offset) {
len = 0;
begin = pos;
}
d = d->next_lun;
}
}
}
/*
* begin is where we last checked our position with regards to offset
* begin is always less than offset. len is relative to begin. It
* is the number of bytes written past begin
*
*/
stop_output:
/* stop the output and calculate the correct length */
*(buffer + len) = '\0';
*start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin);
if(len > length) {
len = length;
} else if(len < 0){
len = 0;
**start = '\0';
}
return len;
}
/*
* Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
*/
static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
{
return (u32)cmd->serial_number;
}
/*
* Go from a u32 'context' to a struct scsi_cmnd * .
* This could probably be made more efficient.
*/
static struct scsi_cmnd *
adpt_cmd_from_context(adpt_hba * pHba, u32 context)
{
struct scsi_cmnd * cmd;
struct scsi_device * d;
if (context == 0)
return NULL;
spin_unlock(pHba->host->host_lock);
shost_for_each_device(d, pHba->host) {
unsigned long flags;
spin_lock_irqsave(&d->list_lock, flags);
list_for_each_entry(cmd, &d->cmd_list, list) {
if (((u32)cmd->serial_number == context)) {
spin_unlock_irqrestore(&d->list_lock, flags);
scsi_device_put(d);
spin_lock(pHba->host->host_lock);
return cmd;
}
}
spin_unlock_irqrestore(&d->list_lock, flags);
}
spin_lock(pHba->host->host_lock);
return NULL;
}
/*
* Turn a pointer to ioctl reply data into an u32 'context'
*/
static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
{
#if BITS_PER_LONG == 32
return (u32)(unsigned long)reply;
#else
ulong flags = 0;
u32 nr, i;
spin_lock_irqsave(pHba->host->host_lock, flags);
nr = ARRAY_SIZE(pHba->ioctl_reply_context);
for (i = 0; i < nr; i++) {
if (pHba->ioctl_reply_context[i] == NULL) {
pHba->ioctl_reply_context[i] = reply;
break;
}
}
spin_unlock_irqrestore(pHba->host->host_lock, flags);
if (i >= nr) {
kfree (reply);
printk(KERN_WARNING"%s: Too many outstanding "
"ioctl commands\n", pHba->name);
return (u32)-1;
}
return i;
#endif
}
/*
* Go from an u32 'context' to a pointer to ioctl reply data.
*/
static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
{
#if BITS_PER_LONG == 32
return (void *)(unsigned long)context;
#else
void *p = pHba->ioctl_reply_context[context];
pHba->ioctl_reply_context[context] = NULL;
return p;
#endif
}
/*===========================================================================
* Error Handling routines
*===========================================================================
*/
static int adpt_abort(struct scsi_cmnd * cmd)
{
adpt_hba* pHba = NULL; /* host bus adapter structure */
struct adpt_device* dptdevice; /* dpt per device information */
u32 msg[5];
int rcode;
if(cmd->serial_number == 0){
return FAILED;
}
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
return FAILED;
}
memset(msg, 0, sizeof(msg));
msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
msg[2] = 0;
msg[3]= 0;
msg[4] = adpt_cmd_to_context(cmd);
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
if (pHba->host)
spin_unlock_irq(pHba->host->host_lock);
if (rcode != 0) {
if(rcode == -EOPNOTSUPP ){
printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
return FAILED;
}
printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
return FAILED;
}
printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
return SUCCESS;
}
#define I2O_DEVICE_RESET 0x27
// This is the same for BLK and SCSI devices
// NOTE this is wrong in the i2o.h definitions
// This is not currently supported by our adapter but we issue it anyway
static int adpt_device_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 msg[4];
u32 rcode;
int old_state;
struct adpt_device* d = cmd->device->hostdata;
pHba = (void*) cmd->device->host->hostdata[0];
printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
if (!d) {
printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
return FAILED;
}
memset(msg, 0, sizeof(msg));
msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
msg[2] = 0;
msg[3] = 0;
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
old_state = d->state;
d->state |= DPTI_DEV_RESET;
rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
d->state = old_state;
if (pHba->host)
spin_unlock_irq(pHba->host->host_lock);
if (rcode != 0) {
if(rcode == -EOPNOTSUPP ){
printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
return FAILED;
}
printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
return FAILED;
} else {
printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
return SUCCESS;
}
}
#define I2O_HBA_BUS_RESET 0x87
// This version of bus reset is called by the eh_error handler
static int adpt_bus_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 msg[4];
u32 rcode;
pHba = (adpt_hba*)cmd->device->host->hostdata[0];
memset(msg, 0, sizeof(msg));
printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
msg[2] = 0;
msg[3] = 0;
if (pHba->host)
spin_lock_irq(pHba->host->host_lock);
rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
if (pHba->host)
spin_unlock_irq(pHba->host->host_lock);
if (rcode != 0) {
printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
return FAILED;
} else {
printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
return SUCCESS;
}
}
// This version of reset is called by the eh_error_handler
static int __adpt_reset(struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
int rcode;
pHba = (adpt_hba*)cmd->device->host->hostdata[0];
printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
rcode = adpt_hba_reset(pHba);
if(rcode == 0){
printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
return SUCCESS;
} else {
printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
return FAILED;
}
}
static int adpt_reset(struct scsi_cmnd* cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = __adpt_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
}
// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
static int adpt_hba_reset(adpt_hba* pHba)
{
int rcode;
pHba->state |= DPTI_STATE_RESET;
// Activate does get status , init outbound, and get hrt
if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
printk(KERN_ERR "%s: Could not activate\n", pHba->name);
adpt_i2o_delete_hba(pHba);
return rcode;
}
if ((rcode=adpt_i2o_build_sys_table()) < 0) {
adpt_i2o_delete_hba(pHba);
return rcode;
}
PDEBUG("%s: in HOLD state\n",pHba->name);
if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
adpt_i2o_delete_hba(pHba);
return rcode;
}
PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
adpt_i2o_delete_hba(pHba);
return rcode;
}
if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
adpt_i2o_delete_hba(pHba);
return rcode;
}
pHba->state &= ~DPTI_STATE_RESET;
adpt_fail_posted_scbs(pHba);
return 0; /* return success */
}
/*===========================================================================
*
*===========================================================================
*/
static void adpt_i2o_sys_shutdown(void)
{
adpt_hba *pHba, *pNext;
struct adpt_i2o_post_wait_data *p1, *old;
printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
/* Delete all IOPs from the controller chain */
/* They should have already been released by the
* scsi-core
*/
for (pHba = hba_chain; pHba; pHba = pNext) {
pNext = pHba->next;
adpt_i2o_delete_hba(pHba);
}
/* Remove any timedout entries from the wait queue. */
// spin_lock_irqsave(&adpt_post_wait_lock, flags);
/* Nothing should be outstanding at this point so just
* free them
*/
for(p1 = adpt_post_wait_queue; p1;) {
old = p1;
p1 = p1->next;
kfree(old);
}
// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
adpt_post_wait_queue = NULL;
printk(KERN_INFO "Adaptec I2O controllers down.\n");
}
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
{
adpt_hba* pHba = NULL;
adpt_hba* p = NULL;
ulong base_addr0_phys = 0;
ulong base_addr1_phys = 0;
u32 hba_map0_area_size = 0;
u32 hba_map1_area_size = 0;
void __iomem *base_addr_virt = NULL;
void __iomem *msg_addr_virt = NULL;
int dma64 = 0;
int raptorFlag = FALSE;
if(pci_enable_device(pDev)) {
return -EINVAL;
}
if (pci_request_regions(pDev, "dpt_i2o")) {
PERROR("dpti: adpt_config_hba: pci request region failed\n");
return -EINVAL;
}
pci_set_master(pDev);
/*
* See if we should enable dma64 mode.
*/
if (sizeof(dma_addr_t) > 4 &&
pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
dma64 = 1;
}
if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
return -EINVAL;
/* adapter only supports message blocks below 4GB */
pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
base_addr0_phys = pci_resource_start(pDev,0);
hba_map0_area_size = pci_resource_len(pDev,0);
// Check if standard PCI card or single BAR Raptor
if(pDev->device == PCI_DPT_DEVICE_ID){
if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
// Raptor card with this device id needs 4M
hba_map0_area_size = 0x400000;
} else { // Not Raptor - it is a PCI card
if(hba_map0_area_size > 0x100000 ){
hba_map0_area_size = 0x100000;
}
}
} else {// Raptor split BAR config
// Use BAR1 in this configuration
base_addr1_phys = pci_resource_start(pDev,1);
hba_map1_area_size = pci_resource_len(pDev,1);
raptorFlag = TRUE;
}
#if BITS_PER_LONG == 64
/*
* The original Adaptec 64 bit driver has this comment here:
* "x86_64 machines need more optimal mappings"
*
* I assume some HBAs report ridiculously large mappings
* and we need to limit them on platforms with IOMMUs.
*/
if (raptorFlag == TRUE) {
if (hba_map0_area_size > 128)
hba_map0_area_size = 128;
if (hba_map1_area_size > 524288)
hba_map1_area_size = 524288;
} else {
if (hba_map0_area_size > 524288)
hba_map0_area_size = 524288;
}
#endif
base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
if (!base_addr_virt) {
pci_release_regions(pDev);
PERROR("dpti: adpt_config_hba: io remap failed\n");
return -EINVAL;
}
if(raptorFlag == TRUE) {
msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
if (!msg_addr_virt) {
PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
iounmap(base_addr_virt);
pci_release_regions(pDev);
return -EINVAL;
}
} else {
msg_addr_virt = base_addr_virt;
}
// Allocate and zero the data structure
pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
if (!pHba) {
if (msg_addr_virt != base_addr_virt)
iounmap(msg_addr_virt);
iounmap(base_addr_virt);
pci_release_regions(pDev);
return -ENOMEM;
}
mutex_lock(&adpt_configuration_lock);
if(hba_chain != NULL){
for(p = hba_chain; p->next; p = p->next);
p->next = pHba;
} else {
hba_chain = pHba;
}
pHba->next = NULL;
pHba->unit = hba_count;
sprintf(pHba->name, "dpti%d", hba_count);
hba_count++;
mutex_unlock(&adpt_configuration_lock);
pHba->pDev = pDev;
pHba->base_addr_phys = base_addr0_phys;
// Set up the Virtual Base Address of the I2O Device
pHba->base_addr_virt = base_addr_virt;
pHba->msg_addr_virt = msg_addr_virt;
pHba->irq_mask = base_addr_virt+0x30;
pHba->post_port = base_addr_virt+0x40;
pHba->reply_port = base_addr_virt+0x44;
pHba->hrt = NULL;
pHba->lct = NULL;
pHba->lct_size = 0;
pHba->status_block = NULL;
pHba->post_count = 0;
pHba->state = DPTI_STATE_RESET;
pHba->pDev = pDev;
pHba->devices = NULL;
pHba->dma64 = dma64;
// Initializing the spinlocks
spin_lock_init(&pHba->state_lock);
spin_lock_init(&adpt_post_wait_lock);
if(raptorFlag == 0){
printk(KERN_INFO "Adaptec I2O RAID controller"
" %d at %p size=%x irq=%d%s\n",
hba_count-1, base_addr_virt,
hba_map0_area_size, pDev->irq,
dma64 ? " (64-bit DMA)" : "");
} else {
printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
hba_count-1, pDev->irq,
dma64 ? " (64-bit DMA)" : "");
printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
}
if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
adpt_i2o_delete_hba(pHba);
return -EINVAL;
}
return 0;
}
static void adpt_i2o_delete_hba(adpt_hba* pHba)
{
adpt_hba* p1;
adpt_hba* p2;
struct i2o_device* d;
struct i2o_device* next;
int i;
int j;
struct adpt_device* pDev;
struct adpt_device* pNext;
mutex_lock(&adpt_configuration_lock);
// scsi_unregister calls our adpt_release which
// does a quiese
if(pHba->host){
free_irq(pHba->host->irq, pHba);
}
p2 = NULL;
for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
if(p1 == pHba) {
if(p2) {
p2->next = p1->next;
} else {
hba_chain = p1->next;
}
break;
}
}
hba_count--;
mutex_unlock(&adpt_configuration_lock);
iounmap(pHba->base_addr_virt);
pci_release_regions(pHba->pDev);
if(pHba->msg_addr_virt != pHba->base_addr_virt){
iounmap(pHba->msg_addr_virt);
}
if(pHba->FwDebugBuffer_P)
iounmap(pHba->FwDebugBuffer_P);
if(pHba->hrt) {
dma_free_coherent(&pHba->pDev->dev,
pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
pHba->hrt, pHba->hrt_pa);
}
if(pHba->lct) {
dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
pHba->lct, pHba->lct_pa);
}
if(pHba->status_block) {
dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
pHba->status_block, pHba->status_block_pa);
}
if(pHba->reply_pool) {
dma_free_coherent(&pHba->pDev->dev,
pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
pHba->reply_pool, pHba->reply_pool_pa);
}
for(d = pHba->devices; d ; d = next){
next = d->next;
kfree(d);
}
for(i = 0 ; i < pHba->top_scsi_channel ; i++){
for(j = 0; j < MAX_ID; j++){
if(pHba->channel[i].device[j] != NULL){
for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
pNext = pDev->next_lun;
kfree(pDev);
}
}
}
}
pci_dev_put(pHba->pDev);
if (adpt_sysfs_class)
device_destroy(adpt_sysfs_class,
MKDEV(DPTI_I2O_MAJOR, pHba->unit));
kfree(pHba);
if(hba_count <= 0){
unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
if (adpt_sysfs_class) {
class_destroy(adpt_sysfs_class);
adpt_sysfs_class = NULL;
}
}
}
static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
{
struct adpt_device* d;
if(chan < 0 || chan >= MAX_CHANNEL)
return NULL;
if( pHba->channel[chan].device == NULL){
printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
return NULL;
}
d = pHba->channel[chan].device[id];
if(!d || d->tid == 0) {
return NULL;
}
/* If it is the only lun at that address then this should match*/
if(d->scsi_lun == lun){
return d;
}
/* else we need to look through all the luns */
for(d=d->next_lun ; d ; d = d->next_lun){
if(d->scsi_lun == lun){
return d;
}
}
return NULL;
}
static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
{
// I used my own version of the WAIT_QUEUE_HEAD
// to handle some version differences
// When embedded in the kernel this could go back to the vanilla one
ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
int status = 0;
ulong flags = 0;
struct adpt_i2o_post_wait_data *p1, *p2;
struct adpt_i2o_post_wait_data *wait_data =
kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
DECLARE_WAITQUEUE(wait, current);
if (!wait_data)
return -ENOMEM;
/*
* The spin locking is needed to keep anyone from playing
* with the queue pointers and id while we do the same
*/
spin_lock_irqsave(&adpt_post_wait_lock, flags);
// TODO we need a MORE unique way of getting ids
// to support async LCT get
wait_data->next = adpt_post_wait_queue;
adpt_post_wait_queue = wait_data;
adpt_post_wait_id++;
adpt_post_wait_id &= 0x7fff;
wait_data->id = adpt_post_wait_id;
spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
wait_data->wq = &adpt_wq_i2o_post;
wait_data->status = -ETIMEDOUT;
add_wait_queue(&adpt_wq_i2o_post, &wait);
msg[2] |= 0x80000000 | ((u32)wait_data->id);
timeout *= HZ;
if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
set_current_state(TASK_INTERRUPTIBLE);
if(pHba->host)
spin_unlock_irq(pHba->host->host_lock);
if (!timeout)
schedule();
else{
timeout = schedule_timeout(timeout);
if (timeout == 0) {
// I/O issued, but cannot get result in
// specified time. Freeing resorces is
// dangerous.
status = -ETIME;
}
}
if(pHba->host)
spin_lock_irq(pHba->host->host_lock);
}
remove_wait_queue(&adpt_wq_i2o_post, &wait);
if(status == -ETIMEDOUT){
printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
// We will have to free the wait_data memory during shutdown
return status;
}
/* Remove the entry from the queue. */
p2 = NULL;
spin_lock_irqsave(&adpt_post_wait_lock, flags);
for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
if(p1 == wait_data) {
if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
status = -EOPNOTSUPP;
}
if(p2) {
p2->next = p1->next;
} else {
adpt_post_wait_queue = p1->next;
}
break;
}
}
spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
kfree(wait_data);
return status;
}
static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
{
u32 m = EMPTY_QUEUE;
u32 __iomem *msg;
ulong timeout = jiffies + 30*HZ;
do {
rmb();
m = readl(pHba->post_port);
if (m != EMPTY_QUEUE) {
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while(m == EMPTY_QUEUE);
msg = pHba->msg_addr_virt + m;
memcpy_toio(msg, data, len);
wmb();
//post message
writel(m, pHba->post_port);
wmb();
return 0;
}
static void adpt_i2o_post_wait_complete(u32 context, int status)
{
struct adpt_i2o_post_wait_data *p1 = NULL;
/*
* We need to search through the adpt_post_wait
* queue to see if the given message is still
* outstanding. If not, it means that the IOP
* took longer to respond to the message than we
* had allowed and timer has already expired.
* Not much we can do about that except log
* it for debug purposes, increase timeout, and recompile
*
* Lock needed to keep anyone from moving queue pointers
* around while we're looking through them.
*/
context &= 0x7fff;
spin_lock(&adpt_post_wait_lock);
for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
if(p1->id == context) {
p1->status = status;
spin_unlock(&adpt_post_wait_lock);
wake_up_interruptible(p1->wq);
return;
}
}
spin_unlock(&adpt_post_wait_lock);
// If this happens we lose commands that probably really completed
printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
printk(KERN_DEBUG" Tasks in wait queue:\n");
for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
printk(KERN_DEBUG" %d\n",p1->id);
}
return;
}
static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
{
u32 msg[8];
u8* status;
dma_addr_t addr;
u32 m = EMPTY_QUEUE ;
ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
if(pHba->initialized == FALSE) { // First time reset should be quick
timeout = jiffies + (25*HZ);
} else {
adpt_i2o_quiesce_hba(pHba);
}
do {
rmb();
m = readl(pHba->post_port);
if (m != EMPTY_QUEUE) {
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"Timeout waiting for message!\n");
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while (m == EMPTY_QUEUE);
status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
if(status == NULL) {
adpt_send_nop(pHba, m);
printk(KERN_ERR"IOP reset failed - no free memory.\n");
return -ENOMEM;
}
memset(status,0,4);
msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
msg[2]=0;
msg[3]=0;
msg[4]=0;
msg[5]=0;
msg[6]=dma_low(addr);
msg[7]=dma_high(addr);
memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
wmb();
writel(m, pHba->post_port);
wmb();
while(*status == 0){
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
/* We lose 4 bytes of "status" here, but we cannot
free these because controller may awake and corrupt
those bytes at any time */
/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
return -ETIMEDOUT;
}
rmb();
schedule_timeout_uninterruptible(1);
}
if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
PDEBUG("%s: Reset in progress...\n", pHba->name);
// Here we wait for message frame to become available
// indicated that reset has finished
do {
rmb();
m = readl(pHba->post_port);
if (m != EMPTY_QUEUE) {
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
/* We lose 4 bytes of "status" here, but we
cannot free these because controller may
awake and corrupt those bytes at any time */
/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while (m == EMPTY_QUEUE);
// Flush the offset
adpt_send_nop(pHba, m);
}
adpt_i2o_status_get(pHba);
if(*status == 0x02 ||
pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
pHba->name);
} else {
PDEBUG("%s: Reset completed.\n", pHba->name);
}
dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
#ifdef UARTDELAY
// This delay is to allow someone attached to the card through the debug UART to
// set up the dump levels that they want before the rest of the initialization sequence
adpt_delay(20000);
#endif
return 0;
}
static int adpt_i2o_parse_lct(adpt_hba* pHba)
{
int i;
int max;
int tid;
struct i2o_device *d;
i2o_lct *lct = pHba->lct;
u8 bus_no = 0;
s16 scsi_id;
s16 scsi_lun;
u32 buf[10]; // larger than 7, or 8 ...
struct adpt_device* pDev;
if (lct == NULL) {
printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
return -1;
}
max = lct->table_size;
max -= 3;
max /= 9;
for(i=0;i<max;i++) {
if( lct->lct_entry[i].user_tid != 0xfff){
/*
* If we have hidden devices, we need to inform the upper layers about
* the possible maximum id reference to handle device access when
* an array is disassembled. This code has no other purpose but to
* allow us future access to devices that are currently hidden
* behind arrays, hotspares or have not been configured (JBOD mode).
*/
if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
continue;
}
tid = lct->lct_entry[i].tid;
// I2O_DPT_DEVICE_INFO_GROUP_NO;
if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
continue;
}
bus_no = buf[0]>>16;
scsi_id = buf[1];
scsi_lun = (buf[2]>>8 )&0xff;
if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
continue;
}
if (scsi_id >= MAX_ID){
printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
continue;
}
if(bus_no > pHba->top_scsi_channel){
pHba->top_scsi_channel = bus_no;
}
if(scsi_id > pHba->top_scsi_id){
pHba->top_scsi_id = scsi_id;
}
if(scsi_lun > pHba->top_scsi_lun){
pHba->top_scsi_lun = scsi_lun;
}
continue;
}
d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
if(d==NULL)
{
printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
return -ENOMEM;
}
d->controller = pHba;
d->next = NULL;
memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
d->flags = 0;
tid = d->lct_data.tid;
adpt_i2o_report_hba_unit(pHba, d);
adpt_i2o_install_device(pHba, d);
}
bus_no = 0;
for(d = pHba->devices; d ; d = d->next) {
if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
tid = d->lct_data.tid;
// TODO get the bus_no from hrt-but for now they are in order
//bus_no =
if(bus_no > pHba->top_scsi_channel){
pHba->top_scsi_channel = bus_no;
}
pHba->channel[bus_no].type = d->lct_data.class_id;
pHba->channel[bus_no].tid = tid;
if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
{
pHba->channel[bus_no].scsi_id = buf[1];
PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
}
// TODO remove - this is just until we get from hrt
bus_no++;
if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
break;
}
}
}
// Setup adpt_device table
for(d = pHba->devices; d ; d = d->next) {
if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
tid = d->lct_data.tid;
scsi_id = -1;
// I2O_DPT_DEVICE_INFO_GROUP_NO;
if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
bus_no = buf[0]>>16;
scsi_id = buf[1];
scsi_lun = (buf[2]>>8 )&0xff;
if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
continue;
}
if (scsi_id >= MAX_ID) {
continue;
}
if( pHba->channel[bus_no].device[scsi_id] == NULL){
pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev == NULL) {
return -ENOMEM;
}
pHba->channel[bus_no].device[scsi_id] = pDev;
} else {
for( pDev = pHba->channel[bus_no].device[scsi_id];
pDev->next_lun; pDev = pDev->next_lun){
}
pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
if(pDev->next_lun == NULL) {
return -ENOMEM;
}
pDev = pDev->next_lun;
}
pDev->tid = tid;
pDev->scsi_channel = bus_no;
pDev->scsi_id = scsi_id;
pDev->scsi_lun = scsi_lun;
pDev->pI2o_dev = d;
d->owner = pDev;
pDev->type = (buf[0])&0xff;
pDev->flags = (buf[0]>>8)&0xff;
if(scsi_id > pHba->top_scsi_id){
pHba->top_scsi_id = scsi_id;
}
if(scsi_lun > pHba->top_scsi_lun){
pHba->top_scsi_lun = scsi_lun;
}
}
if(scsi_id == -1){
printk(KERN_WARNING"Could not find SCSI ID for %s\n",
d->lct_data.identity_tag);
}
}
}
return 0;
}
/*
* Each I2O controller has a chain of devices on it - these match
* the useful parts of the LCT of the board.
*/
static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
{
mutex_lock(&adpt_configuration_lock);
d->controller=pHba;
d->owner=NULL;
d->next=pHba->devices;
d->prev=NULL;
if (pHba->devices != NULL){
pHba->devices->prev=d;
}
pHba->devices=d;
*d->dev_name = 0;
mutex_unlock(&adpt_configuration_lock);
return 0;
}
static int adpt_open(struct inode *inode, struct file *file)
{
int minor;
adpt_hba* pHba;
mutex_lock(&adpt_mutex);
//TODO check for root access
//
minor = iminor(inode);
if (minor >= hba_count) {
mutex_unlock(&adpt_mutex);
return -ENXIO;
}
mutex_lock(&adpt_configuration_lock);
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if (pHba->unit == minor) {
break; /* found adapter */
}
}
if (pHba == NULL) {
mutex_unlock(&adpt_configuration_lock);
mutex_unlock(&adpt_mutex);
return -ENXIO;
}
// if(pHba->in_use){
// mutex_unlock(&adpt_configuration_lock);
// return -EBUSY;
// }
pHba->in_use = 1;
mutex_unlock(&adpt_configuration_lock);
mutex_unlock(&adpt_mutex);
return 0;
}
static int adpt_close(struct inode *inode, struct file *file)
{
int minor;
adpt_hba* pHba;
minor = iminor(inode);
if (minor >= hba_count) {
return -ENXIO;
}
mutex_lock(&adpt_configuration_lock);
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if (pHba->unit == minor) {
break; /* found adapter */
}
}
mutex_unlock(&adpt_configuration_lock);
if (pHba == NULL) {
return -ENXIO;
}
pHba->in_use = 0;
return 0;
}
static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
{
u32 msg[MAX_MESSAGE_SIZE];
u32* reply = NULL;
u32 size = 0;
u32 reply_size = 0;
u32 __user *user_msg = arg;
u32 __user * user_reply = NULL;
void *sg_list[pHba->sg_tablesize];
u32 sg_offset = 0;
u32 sg_count = 0;
int sg_index = 0;
u32 i = 0;
u32 rcode = 0;
void *p = NULL;
dma_addr_t addr;
ulong flags = 0;
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
// get user msg size in u32s
if(get_user(size, &user_msg[0])){
return -EFAULT;
}
size = size>>16;
user_reply = &user_msg[size];
if(size > MAX_MESSAGE_SIZE){
return -EFAULT;
}
size *= 4; // Convert to bytes
/* Copy in the user's I2O command */
if(copy_from_user(msg, user_msg, size)) {
return -EFAULT;
}
get_user(reply_size, &user_reply[0]);
reply_size = reply_size>>16;
if(reply_size > REPLY_FRAME_SIZE){
reply_size = REPLY_FRAME_SIZE;
}
reply_size *= 4;
reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
if(reply == NULL) {
printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
return -ENOMEM;
}
sg_offset = (msg[0]>>4)&0xf;
msg[2] = 0x40000000; // IOCTL context
msg[3] = adpt_ioctl_to_context(pHba, reply);
if (msg[3] == (u32)-1)
return -EBUSY;
memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
if(sg_offset) {
// TODO add 64 bit API
struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
if (sg_count > pHba->sg_tablesize){
printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
kfree (reply);
return -EINVAL;
}
for(i = 0; i < sg_count; i++) {
int sg_size;
if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
rcode = -EINVAL;
goto cleanup;
}
sg_size = sg[i].flag_count & 0xffffff;
/* Allocate memory for the transfer */
p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
if(!p) {
printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
pHba->name,sg_size,i,sg_count);
rcode = -ENOMEM;
goto cleanup;
}
sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
/* Copy in the user's SG buffer if necessary */
if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
// sg_simple_element API is 32 bit
if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
rcode = -EFAULT;
goto cleanup;
}
}
/* sg_simple_element API is 32 bit, but addr < 4GB */
sg[i].addr_bus = addr;
}
}
do {
if(pHba->host)
spin_lock_irqsave(pHba->host->host_lock, flags);
// This state stops any new commands from enterring the
// controller while processing the ioctl
// pHba->state |= DPTI_STATE_IOCTL;
// We can't set this now - The scsi subsystem sets host_blocked and
// the queue empties and stops. We need a way to restart the queue
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
if (rcode != 0)
printk("adpt_i2o_passthru: post wait failed %d %p\n",
rcode, reply);
// pHba->state &= ~DPTI_STATE_IOCTL;
if(pHba->host)
spin_unlock_irqrestore(pHba->host->host_lock, flags);
} while(rcode == -ETIMEDOUT);
if(rcode){
goto cleanup;
}
if(sg_offset) {
/* Copy back the Scatter Gather buffers back to user space */
u32 j;
// TODO add 64 bit API
struct sg_simple_element* sg;
int sg_size;
// re-acquire the original message to handle correctly the sg copy operation
memset(&msg, 0, MAX_MESSAGE_SIZE*4);
// get user msg size in u32s
if(get_user(size, &user_msg[0])){
rcode = -EFAULT;
goto cleanup;
}
size = size>>16;
size *= 4;
if (size > MAX_MESSAGE_SIZE) {
rcode = -EINVAL;
goto cleanup;
}
/* Copy in the user's I2O command */
if (copy_from_user (msg, user_msg, size)) {
rcode = -EFAULT;
goto cleanup;
}
sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
// TODO add 64 bit API
sg = (struct sg_simple_element*)(msg + sg_offset);
for (j = 0; j < sg_count; j++) {
/* Copy out the SG list to user's buffer if necessary */
if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
sg_size = sg[j].flag_count & 0xffffff;
// sg_simple_element API is 32 bit
if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
rcode = -EFAULT;
goto cleanup;
}
}
}
}
/* Copy back the reply to user space */
if (reply_size) {
// we wrote our own values for context - now restore the user supplied ones
if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
rcode = -EFAULT;
}
if(copy_to_user(user_reply, reply, reply_size)) {
printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
rcode = -EFAULT;
}
}
cleanup:
if (rcode != -ETIME && rcode != -EINTR) {
struct sg_simple_element *sg =
(struct sg_simple_element*) (msg +sg_offset);
kfree (reply);
while(sg_index) {
if(sg_list[--sg_index]) {
dma_free_coherent(&pHba->pDev->dev,
sg[sg_index].flag_count & 0xffffff,
sg_list[sg_index],
sg[sg_index].addr_bus);
}
}
}
return rcode;
}
#if defined __ia64__
static void adpt_ia64_info(sysInfo_S* si)
{
// This is all the info we need for now
// We will add more info as our new
// managmenent utility requires it
si->processorType = PROC_IA64;
}
#endif
#if defined __sparc__
static void adpt_sparc_info(sysInfo_S* si)
{
// This is all the info we need for now
// We will add more info as our new
// managmenent utility requires it
si->processorType = PROC_ULTRASPARC;
}
#endif
#if defined __alpha__
static void adpt_alpha_info(sysInfo_S* si)
{
// This is all the info we need for now
// We will add more info as our new
// managmenent utility requires it
si->processorType = PROC_ALPHA;
}
#endif
#if defined __i386__
static void adpt_i386_info(sysInfo_S* si)
{
// This is all the info we need for now
// We will add more info as our new
// managmenent utility requires it
switch (boot_cpu_data.x86) {
case CPU_386:
si->processorType = PROC_386;
break;
case CPU_486:
si->processorType = PROC_486;
break;
case CPU_586:
si->processorType = PROC_PENTIUM;
break;
default: // Just in case
si->processorType = PROC_PENTIUM;
break;
}
}
#endif
/*
* This routine returns information about the system. This does not effect
* any logic and if the info is wrong - it doesn't matter.
*/
/* Get all the info we can not get from kernel services */
static int adpt_system_info(void __user *buffer)
{
sysInfo_S si;
memset(&si, 0, sizeof(si));
si.osType = OS_LINUX;
si.osMajorVersion = 0;
si.osMinorVersion = 0;
si.osRevision = 0;
si.busType = SI_PCI_BUS;
si.processorFamily = DPTI_sig.dsProcessorFamily;
#if defined __i386__
adpt_i386_info(&si);
#elif defined (__ia64__)
adpt_ia64_info(&si);
#elif defined(__sparc__)
adpt_sparc_info(&si);
#elif defined (__alpha__)
adpt_alpha_info(&si);
#else
si.processorType = 0xff ;
#endif
if (copy_to_user(buffer, &si, sizeof(si))){
printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
return -EFAULT;
}
return 0;
}
static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
{
int minor;
int error = 0;
adpt_hba* pHba;
ulong flags = 0;
void __user *argp = (void __user *)arg;
minor = iminor(inode);
if (minor >= DPTI_MAX_HBA){
return -ENXIO;
}
mutex_lock(&adpt_configuration_lock);
for (pHba = hba_chain; pHba; pHba = pHba->next) {
if (pHba->unit == minor) {
break; /* found adapter */
}
}
mutex_unlock(&adpt_configuration_lock);
if(pHba == NULL){
return -ENXIO;
}
while((volatile u32) pHba->state & DPTI_STATE_RESET )
schedule_timeout_uninterruptible(2);
switch (cmd) {
// TODO: handle 3 cases
case DPT_SIGNATURE:
if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
return -EFAULT;
}
break;
case I2OUSRCMD:
return adpt_i2o_passthru(pHba, argp);
case DPT_CTRLINFO:{
drvrHBAinfo_S HbaInfo;
#define FLG_OSD_PCI_VALID 0x0001
#define FLG_OSD_DMA 0x0002
#define FLG_OSD_I2O 0x0004
memset(&HbaInfo, 0, sizeof(HbaInfo));
HbaInfo.drvrHBAnum = pHba->unit;
HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
HbaInfo.blinkState = adpt_read_blink_led(pHba);
HbaInfo.pciBusNum = pHba->pDev->bus->number;
HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
HbaInfo.Interrupt = pHba->pDev->irq;
HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
return -EFAULT;
}
break;
}
case DPT_SYSINFO:
return adpt_system_info(argp);
case DPT_BLINKLED:{
u32 value;
value = (u32)adpt_read_blink_led(pHba);
if (copy_to_user(argp, &value, sizeof(value))) {
return -EFAULT;
}
break;
}
case I2ORESETCMD:
if(pHba->host)
spin_lock_irqsave(pHba->host->host_lock, flags);
adpt_hba_reset(pHba);
if(pHba->host)
spin_unlock_irqrestore(pHba->host->host_lock, flags);
break;
case I2ORESCANCMD:
adpt_rescan(pHba);
break;
default:
return -EINVAL;
}
return error;
}
static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
{
struct inode *inode;
long ret;
inode = file->f_dentry->d_inode;
mutex_lock(&adpt_mutex);
ret = adpt_ioctl(inode, file, cmd, arg);
mutex_unlock(&adpt_mutex);
return ret;
}
#ifdef CONFIG_COMPAT
static long compat_adpt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct inode *inode;
long ret;
inode = file->f_dentry->d_inode;
mutex_lock(&adpt_mutex);
switch(cmd) {
case DPT_SIGNATURE:
case I2OUSRCMD:
case DPT_CTRLINFO:
case DPT_SYSINFO:
case DPT_BLINKLED:
case I2ORESETCMD:
case I2ORESCANCMD:
case (DPT_TARGET_BUSY & 0xFFFF):
case DPT_TARGET_BUSY:
ret = adpt_ioctl(inode, file, cmd, arg);
break;
default:
ret = -ENOIOCTLCMD;
}
mutex_unlock(&adpt_mutex);
return ret;
}
#endif
static irqreturn_t adpt_isr(int irq, void *dev_id)
{
struct scsi_cmnd* cmd;
adpt_hba* pHba = dev_id;
u32 m;
void __iomem *reply;
u32 status=0;
u32 context;
ulong flags = 0;
int handled = 0;
if (pHba == NULL){
printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
return IRQ_NONE;
}
if(pHba->host)
spin_lock_irqsave(pHba->host->host_lock, flags);
while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
m = readl(pHba->reply_port);
if(m == EMPTY_QUEUE){
// Try twice then give up
rmb();
m = readl(pHba->reply_port);
if(m == EMPTY_QUEUE){
// This really should not happen
printk(KERN_ERR"dpti: Could not get reply frame\n");
goto out;
}
}
if (pHba->reply_pool_pa <= m &&
m < pHba->reply_pool_pa +
(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
reply = (u8 *)pHba->reply_pool +
(m - pHba->reply_pool_pa);
} else {
/* Ick, we should *never* be here */
printk(KERN_ERR "dpti: reply frame not from pool\n");
reply = (u8 *)bus_to_virt(m);
}
if (readl(reply) & MSG_FAIL) {
u32 old_m = readl(reply+28);
void __iomem *msg;
u32 old_context;
PDEBUG("%s: Failed message\n",pHba->name);
if(old_m >= 0x100000){
printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
writel(m,pHba->reply_port);
continue;
}
// Transaction context is 0 in failed reply frame
msg = pHba->msg_addr_virt + old_m;
old_context = readl(msg+12);
writel(old_context, reply+12);
adpt_send_nop(pHba, old_m);
}
context = readl(reply+8);
if(context & 0x40000000){ // IOCTL
void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
if( p != NULL) {
memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
}
// All IOCTLs will also be post wait
}
if(context & 0x80000000){ // Post wait message
status = readl(reply+16);
if(status >> 24){
status &= 0xffff; /* Get detail status */
} else {
status = I2O_POST_WAIT_OK;
}
if(!(context & 0x40000000)) {
cmd = adpt_cmd_from_context(pHba,
readl(reply+12));
if(cmd != NULL) {
printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
}
}
adpt_i2o_post_wait_complete(context, status);
} else { // SCSI message
cmd = adpt_cmd_from_context (pHba, readl(reply+12));
if(cmd != NULL){
scsi_dma_unmap(cmd);
if(cmd->serial_number != 0) { // If not timedout
adpt_i2o_to_scsi(reply, cmd);
}
}
}
writel(m, pHba->reply_port);
wmb();
rmb();
}
handled = 1;
out: if(pHba->host)
spin_unlock_irqrestore(pHba->host->host_lock, flags);
return IRQ_RETVAL(handled);
}
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
{
int i;
u32 msg[MAX_MESSAGE_SIZE];
u32* mptr;
u32* lptr;
u32 *lenptr;
int direction;
int scsidir;
int nseg;
u32 len;
u32 reqlen;
s32 rcode;
dma_addr_t addr;
memset(msg, 0 , sizeof(msg));
len = scsi_bufflen(cmd);
direction = 0x00000000;
scsidir = 0x00000000; // DATA NO XFER
if(len) {
/*
* Set SCBFlags to indicate if data is being transferred
* in or out, or no data transfer
* Note: Do not have to verify index is less than 0 since
* cmd->cmnd[0] is an unsigned char
*/
switch(cmd->sc_data_direction){
case DMA_FROM_DEVICE:
scsidir =0x40000000; // DATA IN (iop<--dev)
break;
case DMA_TO_DEVICE:
direction=0x04000000; // SGL OUT
scsidir =0x80000000; // DATA OUT (iop-->dev)
break;
case DMA_NONE:
break;
case DMA_BIDIRECTIONAL:
scsidir =0x40000000; // DATA IN (iop<--dev)
// Assume In - and continue;
break;
default:
printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
pHba->name, cmd->cmnd[0]);
cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
cmd->scsi_done(cmd);
return 0;
}
}
// msg[0] is set later
// I2O_CMD_SCSI_EXEC
msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
msg[2] = 0;
msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
// Our cards use the transaction context as the tag for queueing
// Adaptec/DPT Private stuff
msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
msg[5] = d->tid;
/* Direction, disconnect ok | sense data | simple queue , CDBLen */
// I2O_SCB_FLAG_ENABLE_DISCONNECT |
// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
msg[6] = scsidir|0x20a00000|cmd->cmd_len;
mptr=msg+7;
// Write SCSI command into the message - always 16 byte block
memset(mptr, 0, 16);
memcpy(mptr, cmd->cmnd, cmd->cmd_len);
mptr+=4;
lenptr=mptr++; /* Remember me - fill in when we know */
if (dpt_dma64(pHba)) {
reqlen = 16; // SINGLE SGE
*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
*mptr++ = 1 << PAGE_SHIFT;
} else {
reqlen = 14; // SINGLE SGE
}
/* Now fill in the SGList and command */
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
if (nseg) {
struct scatterlist *sg;
len = 0;
scsi_for_each_sg(cmd, sg, nseg, i) {
lptr = mptr;
*mptr++ = direction|0x10000000|sg_dma_len(sg);
len+=sg_dma_len(sg);
addr = sg_dma_address(sg);
*mptr++ = dma_low(addr);
if (dpt_dma64(pHba))
*mptr++ = dma_high(addr);
/* Make this an end of list */
if (i == nseg - 1)
*lptr = direction|0xD0000000|sg_dma_len(sg);
}
reqlen = mptr - msg;
*lenptr = len;
if(cmd->underflow && len != cmd->underflow){
printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
len, cmd->underflow);
}
} else {
*lenptr = len = 0;
reqlen = 12;
}
/* Stick the headers on */
msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
// Send it on it's way
rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
if (rcode == 0) {
return 0;
}
return rcode;
}
static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
{
struct Scsi_Host *host;
host = scsi_host_alloc(sht, sizeof(adpt_hba*));
if (host == NULL) {
printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
return -1;
}
host->hostdata[0] = (unsigned long)pHba;
pHba->host = host;
host->irq = pHba->pDev->irq;
/* no IO ports, so don't have to set host->io_port and
* host->n_io_port
*/
host->io_port = 0;
host->n_io_port = 0;
/* see comments in scsi_host.h */
host->max_id = 16;
host->max_lun = 256;
host->max_channel = pHba->top_scsi_channel + 1;
host->cmd_per_lun = 1;
host->unique_id = (u32)sys_tbl_pa + pHba->unit;
host->sg_tablesize = pHba->sg_tablesize;
host->can_queue = pHba->post_fifo_size;
return 0;
}
static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
{
adpt_hba* pHba;
u32 hba_status;
u32 dev_status;
u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
// I know this would look cleaner if I just read bytes
// but the model I have been using for all the rest of the
// io is in 4 byte words - so I keep that model
u16 detailed_status = readl(reply+16) &0xffff;
dev_status = (detailed_status & 0xff);
hba_status = detailed_status >> 8;
// calculate resid for sg
scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
pHba = (adpt_hba*) cmd->device->host->hostdata[0];
cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
if(!(reply_flags & MSG_FAIL)) {
switch(detailed_status & I2O_SCSI_DSC_MASK) {
case I2O_SCSI_DSC_SUCCESS:
cmd->result = (DID_OK << 16);
// handle underflow
if (readl(reply+20) < cmd->underflow) {
cmd->result = (DID_ERROR <<16);
printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
}
break;
case I2O_SCSI_DSC_REQUEST_ABORTED:
cmd->result = (DID_ABORT << 16);
break;
case I2O_SCSI_DSC_PATH_INVALID:
case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
case I2O_SCSI_DSC_SELECTION_TIMEOUT:
case I2O_SCSI_DSC_COMMAND_TIMEOUT:
case I2O_SCSI_DSC_NO_ADAPTER:
case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
cmd->result = (DID_TIME_OUT << 16);
break;
case I2O_SCSI_DSC_ADAPTER_BUSY:
case I2O_SCSI_DSC_BUS_BUSY:
cmd->result = (DID_BUS_BUSY << 16);
break;
case I2O_SCSI_DSC_SCSI_BUS_RESET:
case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
cmd->result = (DID_RESET << 16);
break;
case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
cmd->result = (DID_PARITY << 16);
break;
case I2O_SCSI_DSC_UNABLE_TO_ABORT:
case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
case I2O_SCSI_DSC_AUTOSENSE_FAILED:
case I2O_SCSI_DSC_DATA_OVERRUN:
case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
case I2O_SCSI_DSC_SEQUENCE_FAILURE:
case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
case I2O_SCSI_DSC_PROVIDE_FAILURE:
case I2O_SCSI_DSC_REQUEST_TERMINATED:
case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
case I2O_SCSI_DSC_MESSAGE_RECEIVED:
case I2O_SCSI_DSC_INVALID_CDB:
case I2O_SCSI_DSC_LUN_INVALID:
case I2O_SCSI_DSC_SCSI_TID_INVALID:
case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
case I2O_SCSI_DSC_NO_NEXUS:
case I2O_SCSI_DSC_CDB_RECEIVED:
case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
case I2O_SCSI_DSC_QUEUE_FROZEN:
case I2O_SCSI_DSC_REQUEST_INVALID:
default:
printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
hba_status, dev_status, cmd->cmnd[0]);
cmd->result = (DID_ERROR << 16);
break;
}
// copy over the request sense data if it was a check
// condition status
if (dev_status == SAM_STAT_CHECK_CONDITION) {
u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
// Copy over the sense data
memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
cmd->sense_buffer[2] == DATA_PROTECT ){
/* This is to handle an array failed */
cmd->result = (DID_TIME_OUT << 16);
printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
hba_status, dev_status, cmd->cmnd[0]);
}
}
} else {
/* In this condtion we could not talk to the tid
* the card rejected it. We should signal a retry
* for a limitted number of retries.
*/
cmd->result = (DID_TIME_OUT << 16);
printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
}
cmd->result |= (dev_status);
if(cmd->scsi_done != NULL){
cmd->scsi_done(cmd);
}
return cmd->result;
}
static s32 adpt_rescan(adpt_hba* pHba)
{
s32 rcode;
ulong flags = 0;
if(pHba->host)
spin_lock_irqsave(pHba->host->host_lock, flags);
if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
goto out;
if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
goto out;
rcode = 0;
out: if(pHba->host)
spin_unlock_irqrestore(pHba->host->host_lock, flags);
return rcode;
}
static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
{
int i;
int max;
int tid;
struct i2o_device *d;
i2o_lct *lct = pHba->lct;
u8 bus_no = 0;
s16 scsi_id;
s16 scsi_lun;
u32 buf[10]; // at least 8 u32's
struct adpt_device* pDev = NULL;
struct i2o_device* pI2o_dev = NULL;
if (lct == NULL) {
printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
return -1;
}
max = lct->table_size;
max -= 3;
max /= 9;
// Mark each drive as unscanned
for (d = pHba->devices; d; d = d->next) {
pDev =(struct adpt_device*) d->owner;
if(!pDev){
continue;
}
pDev->state |= DPTI_DEV_UNSCANNED;
}
printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
for(i=0;i<max;i++) {
if( lct->lct_entry[i].user_tid != 0xfff){
continue;
}
if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
tid = lct->lct_entry[i].tid;
if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
printk(KERN_ERR"%s: Could not query device\n",pHba->name);
continue;
}
bus_no = buf[0]>>16;
if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
printk(KERN_WARNING
"%s: Channel number %d out of range\n",
pHba->name, bus_no);
continue;
}
scsi_id = buf[1];
scsi_lun = (buf[2]>>8 )&0xff;
pDev = pHba->channel[bus_no].device[scsi_id];
/* da lun */
while(pDev) {
if(pDev->scsi_lun == scsi_lun) {
break;
}
pDev = pDev->next_lun;
}
if(!pDev ) { // Something new add it
d = kmalloc(sizeof(struct i2o_device),
GFP_ATOMIC);
if(d==NULL)
{
printk(KERN_CRIT "Out of memory for I2O device data.\n");
return -ENOMEM;
}
d->controller = pHba;
d->next = NULL;
memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
d->flags = 0;
adpt_i2o_report_hba_unit(pHba, d);
adpt_i2o_install_device(pHba, d);
pDev = pHba->channel[bus_no].device[scsi_id];
if( pDev == NULL){
pDev =
kzalloc(sizeof(struct adpt_device),
GFP_ATOMIC);
if(pDev == NULL) {
return -ENOMEM;
}
pHba->channel[bus_no].device[scsi_id] = pDev;
} else {
while (pDev->next_lun) {
pDev = pDev->next_lun;
}
pDev = pDev->next_lun =
kzalloc(sizeof(struct adpt_device),
GFP_ATOMIC);
if(pDev == NULL) {
return -ENOMEM;
}
}
pDev->tid = d->lct_data.tid;
pDev->scsi_channel = bus_no;
pDev->scsi_id = scsi_id;
pDev->scsi_lun = scsi_lun;
pDev->pI2o_dev = d;
d->owner = pDev;
pDev->type = (buf[0])&0xff;
pDev->flags = (buf[0]>>8)&0xff;
// Too late, SCSI system has made up it's mind, but what the hey ...
if(scsi_id > pHba->top_scsi_id){
pHba->top_scsi_id = scsi_id;
}
if(scsi_lun > pHba->top_scsi_lun){
pHba->top_scsi_lun = scsi_lun;
}
continue;
} // end of new i2o device
// We found an old device - check it
while(pDev) {
if(pDev->scsi_lun == scsi_lun) {
if(!scsi_device_online(pDev->pScsi_dev)) {
printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
pHba->name,bus_no,scsi_id,scsi_lun);
if (pDev->pScsi_dev) {
scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
}
}
d = pDev->pI2o_dev;
if(d->lct_data.tid != tid) { // something changed
pDev->tid = tid;
memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
if (pDev->pScsi_dev) {
pDev->pScsi_dev->changed = TRUE;
pDev->pScsi_dev->removable = TRUE;
}
}
// Found it - mark it scanned
pDev->state = DPTI_DEV_ONLINE;
break;
}
pDev = pDev->next_lun;
}
}
}
for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
pDev =(struct adpt_device*) pI2o_dev->owner;
if(!pDev){
continue;
}
// Drive offline drives that previously existed but could not be found
// in the LCT table
if (pDev->state & DPTI_DEV_UNSCANNED){
pDev->state = DPTI_DEV_OFFLINE;
printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
if (pDev->pScsi_dev) {
scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
}
}
}
return 0;
}
static void adpt_fail_posted_scbs(adpt_hba* pHba)
{
struct scsi_cmnd* cmd = NULL;
struct scsi_device* d = NULL;
shost_for_each_device(d, pHba->host) {
unsigned long flags;
spin_lock_irqsave(&d->list_lock, flags);
list_for_each_entry(cmd, &d->cmd_list, list) {
if(cmd->serial_number == 0){
continue;
}
cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
cmd->scsi_done(cmd);
}
spin_unlock_irqrestore(&d->list_lock, flags);
}
}
/*============================================================================
* Routines from i2o subsystem
*============================================================================
*/
/*
* Bring an I2O controller into HOLD state. See the spec.
*/
static int adpt_i2o_activate_hba(adpt_hba* pHba)
{
int rcode;
if(pHba->initialized ) {
if (adpt_i2o_status_get(pHba) < 0) {
if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
return rcode;
}
if (adpt_i2o_status_get(pHba) < 0) {
printk(KERN_INFO "HBA not responding.\n");
return -1;
}
}
if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
return -1;
}
if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
adpt_i2o_reset_hba(pHba);
if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
return -1;
}
}
} else {
if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
return rcode;
}
}
if (adpt_i2o_init_outbound_q(pHba) < 0) {
return -1;
}
/* In HOLD state */
if (adpt_i2o_hrt_get(pHba) < 0) {
return -1;
}
return 0;
}
/*
* Bring a controller online into OPERATIONAL state.
*/
static int adpt_i2o_online_hba(adpt_hba* pHba)
{
if (adpt_i2o_systab_send(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
return -1;
}
/* In READY state */
if (adpt_i2o_enable_hba(pHba) < 0) {
adpt_i2o_delete_hba(pHba);
return -1;
}
/* In OPERATIONAL state */
return 0;
}
static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
{
u32 __iomem *msg;
ulong timeout = jiffies + 5*HZ;
while(m == EMPTY_QUEUE){
rmb();
m = readl(pHba->post_port);
if(m != EMPTY_QUEUE){
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
return 2;
}
schedule_timeout_uninterruptible(1);
}
msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
writel( 0,&msg[2]);
wmb();
writel(m, pHba->post_port);
wmb();
return 0;
}
static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
{
u8 *status;
dma_addr_t addr;
u32 __iomem *msg = NULL;
int i;
ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
u32 m;
do {
rmb();
m = readl(pHba->post_port);
if (m != EMPTY_QUEUE) {
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while(m == EMPTY_QUEUE);
msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
if (!status) {
adpt_send_nop(pHba, m);
printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
pHba->name);
return -ENOMEM;
}
memset(status, 0, 4);
writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
writel(0, &msg[2]);
writel(0x0106, &msg[3]); /* Transaction context */
writel(4096, &msg[4]); /* Host page frame size */
writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
writel((u32)addr, &msg[7]);
writel(m, pHba->post_port);
wmb();
// Wait for the reply status to come back
do {
if (*status) {
if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
break;
}
}
rmb();
if(time_after(jiffies,timeout)){
printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
/* We lose 4 bytes of "status" here, but we
cannot free these because controller may
awake and corrupt those bytes at any time */
/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while (1);
// If the command was successful, fill the fifo with our reply
// message packets
if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
return -2;
}
dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
if(pHba->reply_pool != NULL) {
dma_free_coherent(&pHba->pDev->dev,
pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
pHba->reply_pool, pHba->reply_pool_pa);
}
pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
&pHba->reply_pool_pa, GFP_KERNEL);
if (!pHba->reply_pool) {
printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
return -ENOMEM;
}
memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
for(i = 0; i < pHba->reply_fifo_size; i++) {
writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
pHba->reply_port);
wmb();
}
adpt_i2o_status_get(pHba);
return 0;
}
/*
* I2O System Table. Contains information about
* all the IOPs in the system. Used to inform IOPs
* about each other's existence.
*
* sys_tbl_ver is the CurrentChangeIndicator that is
* used by IOPs to track changes.
*/
static s32 adpt_i2o_status_get(adpt_hba* pHba)
{
ulong timeout;
u32 m;
u32 __iomem *msg;
u8 *status_block=NULL;
if(pHba->status_block == NULL) {
pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
sizeof(i2o_status_block),
&pHba->status_block_pa, GFP_KERNEL);
if(pHba->status_block == NULL) {
printk(KERN_ERR
"dpti%d: Get Status Block failed; Out of memory. \n",
pHba->unit);
return -ENOMEM;
}
}
memset(pHba->status_block, 0, sizeof(i2o_status_block));
status_block = (u8*)(pHba->status_block);
timeout = jiffies+TMOUT_GETSTATUS*HZ;
do {
rmb();
m = readl(pHba->post_port);
if (m != EMPTY_QUEUE) {
break;
}
if(time_after(jiffies,timeout)){
printk(KERN_ERR "%s: Timeout waiting for message !\n",
pHba->name);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
} while(m==EMPTY_QUEUE);
msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
writel(1, &msg[2]);
writel(0, &msg[3]);
writel(0, &msg[4]);
writel(0, &msg[5]);
writel( dma_low(pHba->status_block_pa), &msg[6]);
writel( dma_high(pHba->status_block_pa), &msg[7]);
writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
//post message
writel(m, pHba->post_port);
wmb();
while(status_block[87]!=0xff){
if(time_after(jiffies,timeout)){
printk(KERN_ERR"dpti%d: Get status timeout.\n",
pHba->unit);
return -ETIMEDOUT;
}
rmb();
schedule_timeout_uninterruptible(1);
}
// Set up our number of outbound and inbound messages
pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
}
pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
}
// Calculate the Scatter Gather list size
if (dpt_dma64(pHba)) {
pHba->sg_tablesize
= ((pHba->status_block->inbound_frame_size * 4
- 14 * sizeof(u32))
/ (sizeof(struct sg_simple_element) + sizeof(u32)));
} else {
pHba->sg_tablesize
= ((pHba->status_block->inbound_frame_size * 4
- 12 * sizeof(u32))
/ sizeof(struct sg_simple_element));
}
if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
pHba->sg_tablesize = SG_LIST_ELEMENTS;
}
#ifdef DEBUG
printk("dpti%d: State = ",pHba->unit);
switch(pHba->status_block->iop_state) {
case 0x01:
printk("INIT\n");
break;
case 0x02:
printk("RESET\n");
break;
case 0x04:
printk("HOLD\n");
break;
case 0x05:
printk("READY\n");
break;
case 0x08:
printk("OPERATIONAL\n");
break;
case 0x10:
printk("FAILED\n");
break;
case 0x11:
printk("FAULTED\n");
break;
default:
printk("%x (unknown!!)\n",pHba->status_block->iop_state);
}
#endif
return 0;
}
/*
* Get the IOP's Logical Configuration Table
*/
static int adpt_i2o_lct_get(adpt_hba* pHba)
{
u32 msg[8];
int ret;
u32 buf[16];
if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
pHba->lct_size = pHba->status_block->expected_lct_size;
}
do {
if (pHba->lct == NULL) {
pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
pHba->lct_size, &pHba->lct_pa,
GFP_ATOMIC);
if(pHba->lct == NULL) {
printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
pHba->name);
return -ENOMEM;
}
}
memset(pHba->lct, 0, pHba->lct_size);
msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[2] = 0;
msg[3] = 0;
msg[4] = 0xFFFFFFFF; /* All devices */
msg[5] = 0x00000000; /* Report now */
msg[6] = 0xD0000000|pHba->lct_size;
msg[7] = (u32)pHba->lct_pa;
if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
pHba->name, ret);
printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
return ret;
}
if ((pHba->lct->table_size << 2) > pHba->lct_size) {
pHba->lct_size = pHba->lct->table_size << 2;
dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
pHba->lct, pHba->lct_pa);
pHba->lct = NULL;
}
} while (pHba->lct == NULL);
PDEBUG("%s: Hardware resource table read.\n", pHba->name);
// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
pHba->FwDebugBufferSize = buf[1];
pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
pHba->FwDebugBufferSize);
if (pHba->FwDebugBuffer_P) {
pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
FW_DEBUG_FLAGS_OFFSET;
pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
FW_DEBUG_BLED_OFFSET;
pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
FW_DEBUG_STR_LENGTH_OFFSET;
pHba->FwDebugBuffer_P += buf[2];
pHba->FwDebugFlags = 0;
}
}
return 0;
}
static int adpt_i2o_build_sys_table(void)
{
adpt_hba* pHba = hba_chain;
int count = 0;
if (sys_tbl)
dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
sys_tbl, sys_tbl_pa);
sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
(hba_count) * sizeof(struct i2o_sys_tbl_entry);
sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
if (!sys_tbl) {
printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
return -ENOMEM;
}
memset(sys_tbl, 0, sys_tbl_len);
sys_tbl->num_entries = hba_count;
sys_tbl->version = I2OVERSION;
sys_tbl->change_ind = sys_tbl_ind++;
for(pHba = hba_chain; pHba; pHba = pHba->next) {
u64 addr;
// Get updated Status Block so we have the latest information
if (adpt_i2o_status_get(pHba)) {
sys_tbl->num_entries--;
continue; // try next one
}
sys_tbl->iops[count].org_id = pHba->status_block->org_id;
sys_tbl->iops[count].iop_id = pHba->unit + 2;
sys_tbl->iops[count].seg_num = 0;
sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
addr = pHba->base_addr_phys + 0x40;
sys_tbl->iops[count].inbound_low = dma_low(addr);
sys_tbl->iops[count].inbound_high = dma_high(addr);
count++;
}
#ifdef DEBUG
{
u32 *table = (u32*)sys_tbl;
printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
for(count = 0; count < (sys_tbl_len >>2); count++) {
printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
count, table[count]);
}
}
#endif
return 0;
}
/*
* Dump the information block associated with a given unit (TID)
*/
static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
{
char buf[64];
int unit = d->lct_data.tid;
printk(KERN_INFO "TID %3.3d ", unit);
if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
{
buf[16]=0;
printk(" Vendor: %-12.12s", buf);
}
if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
{
buf[16]=0;
printk(" Device: %-12.12s", buf);
}
if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
{
buf[8]=0;
printk(" Rev: %-12.12s\n", buf);
}
#ifdef DEBUG
printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
printk(KERN_INFO "\tFlags: ");
if(d->lct_data.device_flags&(1<<0))
printk("C"); // ConfigDialog requested
if(d->lct_data.device_flags&(1<<1))
printk("U"); // Multi-user capable
if(!(d->lct_data.device_flags&(1<<4)))
printk("P"); // Peer service enabled!
if(!(d->lct_data.device_flags&(1<<5)))
printk("M"); // Mgmt service enabled!
printk("\n");
#endif
}
#ifdef DEBUG
/*
* Do i2o class name lookup
*/
static const char *adpt_i2o_get_class_name(int class)
{
int idx = 16;
static char *i2o_class_name[] = {
"Executive",
"Device Driver Module",
"Block Device",
"Tape Device",
"LAN Interface",
"WAN Interface",
"Fibre Channel Port",
"Fibre Channel Device",
"SCSI Device",
"ATE Port",
"ATE Device",
"Floppy Controller",
"Floppy Device",
"Secondary Bus Port",
"Peer Transport Agent",
"Peer Transport",
"Unknown"
};
switch(class&0xFFF) {
case I2O_CLASS_EXECUTIVE:
idx = 0; break;
case I2O_CLASS_DDM:
idx = 1; break;
case I2O_CLASS_RANDOM_BLOCK_STORAGE:
idx = 2; break;
case I2O_CLASS_SEQUENTIAL_STORAGE:
idx = 3; break;
case I2O_CLASS_LAN:
idx = 4; break;
case I2O_CLASS_WAN:
idx = 5; break;
case I2O_CLASS_FIBRE_CHANNEL_PORT:
idx = 6; break;
case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
idx = 7; break;
case I2O_CLASS_SCSI_PERIPHERAL:
idx = 8; break;
case I2O_CLASS_ATE_PORT:
idx = 9; break;
case I2O_CLASS_ATE_PERIPHERAL:
idx = 10; break;
case I2O_CLASS_FLOPPY_CONTROLLER:
idx = 11; break;
case I2O_CLASS_FLOPPY_DEVICE:
idx = 12; break;
case I2O_CLASS_BUS_ADAPTER_PORT:
idx = 13; break;
case I2O_CLASS_PEER_TRANSPORT_AGENT:
idx = 14; break;
case I2O_CLASS_PEER_TRANSPORT:
idx = 15; break;
}
return i2o_class_name[idx];
}
#endif
static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
{
u32 msg[6];
int ret, size = sizeof(i2o_hrt);
do {
if (pHba->hrt == NULL) {
pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
size, &pHba->hrt_pa, GFP_KERNEL);
if (pHba->hrt == NULL) {
printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
return -ENOMEM;
}
}
msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[2]= 0;
msg[3]= 0;
msg[4]= (0xD0000000 | size); /* Simple transaction */
msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
return ret;
}
if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
dma_free_coherent(&pHba->pDev->dev, size,
pHba->hrt, pHba->hrt_pa);
size = newsize;
pHba->hrt = NULL;
}
} while(pHba->hrt == NULL);
return 0;
}
/*
* Query one scalar group value or a whole scalar group.
*/
static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
int group, int field, void *buf, int buflen)
{
u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
u8 *opblk_va;
dma_addr_t opblk_pa;
u8 *resblk_va;
dma_addr_t resblk_pa;
int size;
/* 8 bytes for header */
resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
if (resblk_va == NULL) {
printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
return -ENOMEM;
}
opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
sizeof(opblk), &opblk_pa, GFP_KERNEL);
if (opblk_va == NULL) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
pHba->name);
return -ENOMEM;
}
if (field == -1) /* whole group */
opblk[4] = -1;
memcpy(opblk_va, opblk, sizeof(opblk));
size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
opblk_va, opblk_pa, sizeof(opblk),
resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
if (size == -ETIME) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
return -ETIME;
} else if (size == -EINTR) {
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
return -EINTR;
}
memcpy(buf, resblk_va+8, buflen); /* cut off header */
dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
resblk_va, resblk_pa);
if (size < 0)
return size;
return buflen;
}
/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
*
* This function can be used for all UtilParamsGet/Set operations.
* The OperationBlock is given in opblk-buffer,
* and results are returned in resblk-buffer.
* Note that the minimum sized resblk is 8 bytes and contains
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
void *opblk_va, dma_addr_t opblk_pa, int oplen,
void *resblk_va, dma_addr_t resblk_pa, int reslen)
{
u32 msg[9];
u32 *res = (u32 *)resblk_va;
int wait_status;
msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
msg[1] = cmd << 24 | HOST_TID << 12 | tid;
msg[2] = 0;
msg[3] = 0;
msg[4] = 0;
msg[5] = 0x54000000 | oplen; /* OperationBlock */
msg[6] = (u32)opblk_pa;
msg[7] = 0xD0000000 | reslen; /* ResultBlock */
msg[8] = (u32)resblk_pa;
if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
return wait_status; /* -DetailedStatus */
}
if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
pHba->name,
(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
: "PARAMS_GET",
res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
}
return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
}
static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
{
u32 msg[4];
int ret;
adpt_i2o_status_get(pHba);
/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
(pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
return 0;
}
msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
msg[2] = 0;
msg[3] = 0;
if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
pHba->unit, -ret);
} else {
printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
}
adpt_i2o_status_get(pHba);
return ret;
}
/*
* Enable IOP. Allows the IOP to resume external operations.
*/
static int adpt_i2o_enable_hba(adpt_hba* pHba)
{
u32 msg[4];
int ret;
adpt_i2o_status_get(pHba);
if(!pHba->status_block){
return -ENOMEM;
}
/* Enable only allowed on READY state */
if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
return 0;
if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
return -EINVAL;
msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
msg[2]= 0;
msg[3]= 0;
if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
pHba->name, ret);
} else {
PDEBUG("%s: Enabled.\n", pHba->name);
}
adpt_i2o_status_get(pHba);
return ret;
}
static int adpt_i2o_systab_send(adpt_hba* pHba)
{
u32 msg[12];
int ret;
msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[2] = 0;
msg[3] = 0;
msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
msg[5] = 0; /* Segment 0 */
/*
* Provide three SGL-elements:
* System table (SysTab), Private memory space declaration and
* Private i/o space declaration
*/
msg[6] = 0x54000000 | sys_tbl_len;
msg[7] = (u32)sys_tbl_pa;
msg[8] = 0x54000000 | 0;
msg[9] = 0;
msg[10] = 0xD4000000 | 0;
msg[11] = 0;
if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
pHba->name, ret);
}
#ifdef DEBUG
else {
PINFO("%s: SysTab set.\n", pHba->name);
}
#endif
return ret;
}
/*============================================================================
*
*============================================================================
*/
#ifdef UARTDELAY
static static void adpt_delay(int millisec)
{
int i;
for (i = 0; i < millisec; i++) {
udelay(1000); /* delay for one millisecond */
}
}
#endif
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "dpt_i2o",
.proc_name = "dpt_i2o",
.proc_info = adpt_proc_info,
.info = adpt_info,
.queuecommand = adpt_queue,
.eh_abort_handler = adpt_abort,
.eh_device_reset_handler = adpt_device_reset,
.eh_bus_reset_handler = adpt_bus_reset,
.eh_host_reset_handler = adpt_reset,
.bios_param = adpt_bios_param,
.slave_configure = adpt_slave_configure,
.can_queue = MAX_TO_IOP_MESSAGES,
.this_id = 7,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int __init adpt_init(void)
{
int error;
adpt_hba *pHba, *next;
printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
error = adpt_detect(&driver_template);
if (error < 0)
return error;
if (hba_chain == NULL)
return -ENODEV;
for (pHba = hba_chain; pHba; pHba = pHba->next) {
error = scsi_add_host(pHba->host, &pHba->pDev->dev);
if (error)
goto fail;
scsi_scan_host(pHba->host);
}
return 0;
fail:
for (pHba = hba_chain; pHba; pHba = next) {
next = pHba->next;
scsi_remove_host(pHba->host);
}
return error;
}
static void __exit adpt_exit(void)
{
adpt_hba *pHba, *next;
for (pHba = hba_chain; pHba; pHba = pHba->next)
scsi_remove_host(pHba->host);
for (pHba = hba_chain; pHba; pHba = next) {
next = pHba->next;
adpt_release(pHba->host);
}
}
module_init(adpt_init);
module_exit(adpt_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
msva/android_kernel_asus_A80 | drivers/ssb/sdio.c | 9117 | 15971 | /*
* Sonics Silicon Backplane
* SDIO-Hostbus related functions
*
* Copyright 2009 Albert Herranz <albert_herranz@yahoo.es>
*
* Based on drivers/ssb/pcmcia.c
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2007-2008 Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*
*/
#include <linux/ssb/ssb.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <linux/mmc/sdio_func.h>
#include "ssb_private.h"
/* Define the following to 1 to enable a printk on each coreswitch. */
#define SSB_VERBOSE_SDIOCORESWITCH_DEBUG 0
/* Hardware invariants CIS tuples */
#define SSB_SDIO_CIS 0x80
#define SSB_SDIO_CIS_SROMREV 0x00
#define SSB_SDIO_CIS_ID 0x01
#define SSB_SDIO_CIS_BOARDREV 0x02
#define SSB_SDIO_CIS_PA 0x03
#define SSB_SDIO_CIS_PA_PA0B0_LO 0
#define SSB_SDIO_CIS_PA_PA0B0_HI 1
#define SSB_SDIO_CIS_PA_PA0B1_LO 2
#define SSB_SDIO_CIS_PA_PA0B1_HI 3
#define SSB_SDIO_CIS_PA_PA0B2_LO 4
#define SSB_SDIO_CIS_PA_PA0B2_HI 5
#define SSB_SDIO_CIS_PA_ITSSI 6
#define SSB_SDIO_CIS_PA_MAXPOW 7
#define SSB_SDIO_CIS_OEMNAME 0x04
#define SSB_SDIO_CIS_CCODE 0x05
#define SSB_SDIO_CIS_ANTENNA 0x06
#define SSB_SDIO_CIS_ANTGAIN 0x07
#define SSB_SDIO_CIS_BFLAGS 0x08
#define SSB_SDIO_CIS_LEDS 0x09
#define CISTPL_FUNCE_LAN_NODE_ID 0x04 /* same as in PCMCIA */
/*
* Function 1 miscellaneous registers.
*
* Definitions match src/include/sbsdio.h from the
* Android Open Source Project
* http://android.git.kernel.org/?p=platform/system/wlan/broadcom.git
*
*/
#define SBSDIO_FUNC1_SBADDRLOW 0x1000a /* SB Address window Low (b15) */
#define SBSDIO_FUNC1_SBADDRMID 0x1000b /* SB Address window Mid (b23-b16) */
#define SBSDIO_FUNC1_SBADDRHIGH 0x1000c /* SB Address window High (b24-b31) */
/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
#define SBSDIO_SBADDRLOW_MASK 0x80 /* Valid address bits in SBADDRLOW */
#define SBSDIO_SBADDRMID_MASK 0xff /* Valid address bits in SBADDRMID */
#define SBSDIO_SBADDRHIGH_MASK 0xff /* Valid address bits in SBADDRHIGH */
#define SBSDIO_SB_OFT_ADDR_MASK 0x7FFF /* sb offset addr is <= 15 bits, 32k */
/* REVISIT: this flag doesn't seem to matter */
#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x8000 /* forces 32-bit SB access */
/*
* Address map within the SDIO function address space (128K).
*
* Start End Description
* ------- ------- ------------------------------------------
* 0x00000 0x0ffff selected backplane address window (64K)
* 0x10000 0x1ffff backplane control registers (max 64K)
*
* The current address window is configured by writing to registers
* SBADDRLOW, SBADDRMID and SBADDRHIGH.
*
* In order to access the contents of a 32-bit Silicon Backplane address
* the backplane address window must be first loaded with the highest
* 16 bits of the target address. Then, an access must be done to the
* SDIO function address space using the lower 15 bits of the address.
* Bit 15 of the address must be set when doing 32 bit accesses.
*
* 10987654321098765432109876543210
* WWWWWWWWWWWWWWWWW SB Address Window
* OOOOOOOOOOOOOOOO Offset within SB Address Window
* a 32-bit access flag
*/
/*
* SSB I/O via SDIO.
*
* NOTE: SDIO address @addr is 17 bits long (SDIO address space is 128K).
*/
static inline struct device *ssb_sdio_dev(struct ssb_bus *bus)
{
return &bus->host_sdio->dev;
}
/* host claimed */
static int ssb_sdio_writeb(struct ssb_bus *bus, unsigned int addr, u8 val)
{
int error = 0;
sdio_writeb(bus->host_sdio, val, addr, &error);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%08X <- %02x, error %d\n",
addr, val, error);
}
return error;
}
#if 0
static u8 ssb_sdio_readb(struct ssb_bus *bus, unsigned int addr)
{
u8 val;
int error = 0;
val = sdio_readb(bus->host_sdio, addr, &error);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%08X -> %02x, error %d\n",
addr, val, error);
}
return val;
}
#endif
/* host claimed */
static int ssb_sdio_set_sbaddr_window(struct ssb_bus *bus, u32 address)
{
int error;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRLOW,
(address >> 8) & SBSDIO_SBADDRLOW_MASK);
if (error)
goto out;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRMID,
(address >> 16) & SBSDIO_SBADDRMID_MASK);
if (error)
goto out;
error = ssb_sdio_writeb(bus, SBSDIO_FUNC1_SBADDRHIGH,
(address >> 24) & SBSDIO_SBADDRHIGH_MASK);
if (error)
goto out;
bus->sdio_sbaddr = address;
out:
if (error) {
dev_dbg(ssb_sdio_dev(bus), "failed to set address window"
" to 0x%08x, error %d\n", address, error);
}
return error;
}
/* for enumeration use only */
u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset)
{
u32 val;
int error;
sdio_claim_host(bus->host_sdio);
val = sdio_readl(bus->host_sdio, offset, &error);
sdio_release_host(bus->host_sdio);
if (unlikely(error)) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
return val;
}
/* for enumeration use only */
int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
{
u32 sbaddr;
int error;
sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE;
sdio_claim_host(bus->host_sdio);
error = ssb_sdio_set_sbaddr_window(bus, sbaddr);
sdio_release_host(bus->host_sdio);
if (error) {
dev_err(ssb_sdio_dev(bus), "failed to switch to core %u,"
" error %d\n", coreidx, error);
goto out;
}
out:
return error;
}
/* host must be already claimed */
int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
{
u8 coreidx = dev->core_index;
u32 sbaddr;
int error = 0;
sbaddr = (coreidx * SSB_CORE_SIZE) + SSB_ENUM_BASE;
if (unlikely(bus->sdio_sbaddr != sbaddr)) {
#if SSB_VERBOSE_SDIOCORESWITCH_DEBUG
dev_info(ssb_sdio_dev(bus),
"switching to %s core, index %d\n",
ssb_core_name(dev->id.coreid), coreidx);
#endif
error = ssb_sdio_set_sbaddr_window(bus, sbaddr);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "failed to switch to"
" core %u, error %d\n", coreidx, error);
goto out;
}
bus->mapped_device = dev;
}
out:
return error;
}
static u8 ssb_sdio_read8(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u8 val = 0xff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
val = sdio_readb(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %02x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
static u16 ssb_sdio_read16(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u16 val = 0xffff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
val = sdio_readw(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %04x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
static u32 ssb_sdio_read32(struct ssb_device *dev, u16 offset)
{
struct ssb_bus *bus = dev->bus;
u32 val = 0xffffffff;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
val = sdio_readl(bus->host_sdio, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X > %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
return val;
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_sdio_block_read(struct ssb_device *dev, void *buffer,
size_t count, u16 offset, u8 reg_width)
{
size_t saved_count = count;
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
memset(buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
switch (reg_width) {
case sizeof(u8): {
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u16): {
SSB_WARN_ON(count & 1);
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
case sizeof(u32): {
SSB_WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_readsb(bus->host_sdio, buffer, offset, count);
break;
}
default:
SSB_WARN_ON(1);
}
if (!error)
goto out;
err_out:
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n",
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
out:
sdio_release_host(bus->host_sdio);
}
#endif /* CONFIG_SSB_BLOCKIO */
static void ssb_sdio_write8(struct ssb_device *dev, u16 offset, u8 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
sdio_writeb(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %02x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
}
static void ssb_sdio_write16(struct ssb_device *dev, u16 offset, u16 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
sdio_writew(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %04x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
out:
sdio_release_host(bus->host_sdio);
}
static void ssb_sdio_write32(struct ssb_device *dev, u16 offset, u32 val)
{
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev)))
goto out;
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
sdio_writel(bus->host_sdio, val, offset, &error);
if (error) {
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X < %08x, error %d\n",
bus->sdio_sbaddr >> 16, offset, val, error);
}
if (bus->quirks & SSB_QUIRK_SDIO_READ_AFTER_WRITE32)
sdio_readl(bus->host_sdio, 0, &error);
out:
sdio_release_host(bus->host_sdio);
}
#ifdef CONFIG_SSB_BLOCKIO
static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
size_t count, u16 offset, u8 reg_width)
{
size_t saved_count = count;
struct ssb_bus *bus = dev->bus;
int error = 0;
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
memset((void *)buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
offset &= SBSDIO_SB_OFT_ADDR_MASK;
switch (reg_width) {
case sizeof(u8):
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u16):
SSB_WARN_ON(count & 1);
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
case sizeof(u32):
SSB_WARN_ON(count & 3);
offset |= SBSDIO_SB_ACCESS_2_4B_FLAG; /* 32 bit data access */
error = sdio_writesb(bus->host_sdio, offset,
(void *)buffer, count);
break;
default:
SSB_WARN_ON(1);
}
if (!error)
goto out;
err_out:
dev_dbg(ssb_sdio_dev(bus), "%04X:%04X (width=%u, len=%zu), error %d\n",
bus->sdio_sbaddr >> 16, offset, reg_width, saved_count, error);
out:
sdio_release_host(bus->host_sdio);
}
#endif /* CONFIG_SSB_BLOCKIO */
/* Not "static", as it's used in main.c */
const struct ssb_bus_ops ssb_sdio_ops = {
.read8 = ssb_sdio_read8,
.read16 = ssb_sdio_read16,
.read32 = ssb_sdio_read32,
.write8 = ssb_sdio_write8,
.write16 = ssb_sdio_write16,
.write32 = ssb_sdio_write32,
#ifdef CONFIG_SSB_BLOCKIO
.block_read = ssb_sdio_block_read,
.block_write = ssb_sdio_block_write,
#endif
};
#define GOTO_ERROR_ON(condition, description) do { \
if (unlikely(condition)) { \
error_description = description; \
goto error; \
} \
} while (0)
int ssb_sdio_get_invariants(struct ssb_bus *bus,
struct ssb_init_invariants *iv)
{
struct ssb_sprom *sprom = &iv->sprom;
struct ssb_boardinfo *bi = &iv->boardinfo;
const char *error_description = "none";
struct sdio_func_tuple *tuple;
void *mac;
memset(sprom, 0xFF, sizeof(*sprom));
sprom->boardflags_lo = 0;
sprom->boardflags_hi = 0;
tuple = bus->host_sdio->tuples;
while (tuple) {
switch (tuple->code) {
case 0x22: /* extended function */
switch (tuple->data[0]) {
case CISTPL_FUNCE_LAN_NODE_ID:
GOTO_ERROR_ON((tuple->size != 7) &&
(tuple->data[1] != 6),
"mac tpl size");
/* fetch the MAC address. */
mac = tuple->data + 2;
memcpy(sprom->il0mac, mac, ETH_ALEN);
memcpy(sprom->et1mac, mac, ETH_ALEN);
break;
default:
break;
}
break;
case 0x80: /* vendor specific tuple */
switch (tuple->data[0]) {
case SSB_SDIO_CIS_SROMREV:
GOTO_ERROR_ON(tuple->size != 2,
"sromrev tpl size");
sprom->revision = tuple->data[1];
break;
case SSB_SDIO_CIS_ID:
GOTO_ERROR_ON((tuple->size != 5) &&
(tuple->size != 7),
"id tpl size");
bi->vendor = tuple->data[1] |
(tuple->data[2]<<8);
break;
case SSB_SDIO_CIS_BOARDREV:
GOTO_ERROR_ON(tuple->size != 2,
"boardrev tpl size");
sprom->board_rev = tuple->data[1];
break;
case SSB_SDIO_CIS_PA:
GOTO_ERROR_ON((tuple->size != 9) &&
(tuple->size != 10),
"pa tpl size");
sprom->pa0b0 = tuple->data[1] |
((u16)tuple->data[2] << 8);
sprom->pa0b1 = tuple->data[3] |
((u16)tuple->data[4] << 8);
sprom->pa0b2 = tuple->data[5] |
((u16)tuple->data[6] << 8);
sprom->itssi_a = tuple->data[7];
sprom->itssi_bg = tuple->data[7];
sprom->maxpwr_a = tuple->data[8];
sprom->maxpwr_bg = tuple->data[8];
break;
case SSB_SDIO_CIS_OEMNAME:
/* Not present */
break;
case SSB_SDIO_CIS_CCODE:
GOTO_ERROR_ON(tuple->size != 2,
"ccode tpl size");
sprom->country_code = tuple->data[1];
break;
case SSB_SDIO_CIS_ANTENNA:
GOTO_ERROR_ON(tuple->size != 2,
"ant tpl size");
sprom->ant_available_a = tuple->data[1];
sprom->ant_available_bg = tuple->data[1];
break;
case SSB_SDIO_CIS_ANTGAIN:
GOTO_ERROR_ON(tuple->size != 2,
"antg tpl size");
sprom->antenna_gain.a0 = tuple->data[1];
sprom->antenna_gain.a1 = tuple->data[1];
sprom->antenna_gain.a2 = tuple->data[1];
sprom->antenna_gain.a3 = tuple->data[1];
break;
case SSB_SDIO_CIS_BFLAGS:
GOTO_ERROR_ON((tuple->size != 3) &&
(tuple->size != 5),
"bfl tpl size");
sprom->boardflags_lo = tuple->data[1] |
((u16)tuple->data[2] << 8);
break;
case SSB_SDIO_CIS_LEDS:
GOTO_ERROR_ON(tuple->size != 5,
"leds tpl size");
sprom->gpio0 = tuple->data[1];
sprom->gpio1 = tuple->data[2];
sprom->gpio2 = tuple->data[3];
sprom->gpio3 = tuple->data[4];
break;
default:
break;
}
break;
default:
break;
}
tuple = tuple->next;
}
return 0;
error:
dev_err(ssb_sdio_dev(bus), "failed to fetch device invariants: %s\n",
error_description);
return -ENODEV;
}
void ssb_sdio_exit(struct ssb_bus *bus)
{
if (bus->bustype != SSB_BUSTYPE_SDIO)
return;
/* Nothing to do here. */
}
int ssb_sdio_init(struct ssb_bus *bus)
{
if (bus->bustype != SSB_BUSTYPE_SDIO)
return 0;
bus->sdio_sbaddr = ~0;
return 0;
}
| gpl-2.0 |
Dazzozo/huawei-kernel-3.4 | drivers/net/wimax/i2400m/sysfs.c | 14493 | 2248 | /*
* Intel Wireless WiMAX Connection 2400m
* Sysfs interfaces to show driver and device information
*
*
* Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include "i2400m.h"
#define D_SUBMODULE sysfs
#include "debug-levels.h"
/*
* Set the idle timeout (msecs)
*
* FIXME: eventually this should be a common WiMAX stack method, but
* would like to wait to see how other devices manage it.
*/
static
ssize_t i2400m_idle_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t result;
struct i2400m *i2400m = net_dev_to_i2400m(to_net_dev(dev));
unsigned val;
result = -EINVAL;
if (sscanf(buf, "%u\n", &val) != 1)
goto error_no_unsigned;
if (val != 0 && (val < 100 || val > 300000 || val % 100 != 0)) {
dev_err(dev, "idle_timeout: %u: invalid msecs specification; "
"valid values are 0, 100-300000 in 100 increments\n",
val);
goto error_bad_value;
}
result = i2400m_set_idle_timeout(i2400m, val);
if (result >= 0)
result = size;
error_no_unsigned:
error_bad_value:
return result;
}
static
DEVICE_ATTR(i2400m_idle_timeout, S_IWUSR,
NULL, i2400m_idle_timeout_store);
static
struct attribute *i2400m_dev_attrs[] = {
&dev_attr_i2400m_idle_timeout.attr,
NULL,
};
struct attribute_group i2400m_dev_attr_group = {
.name = NULL, /* we want them in the same directory */
.attrs = i2400m_dev_attrs,
};
| gpl-2.0 |
moonman/linux-imx6-3.14 | drivers/mmc/host/sdhci-of-arasan.c | 158 | 5971 | /*
* Arasan Secure Digital Host Controller Interface.
* Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2012 Wind River Systems, Inc.
* Copyright (C) 2013 Pengutronix e.K.
* Copyright (C) 2013 Xilinx Inc.
*
* Based on sdhci-of-esdhc.c
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/module.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
#define CLK_CTRL_TIMEOUT_SHIFT 16
#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT)
#define CLK_CTRL_TIMEOUT_MIN_EXP 13
/**
* struct sdhci_arasan_data
* @clk_ahb: Pointer to the AHB clock
*/
struct sdhci_arasan_data {
struct clk *clk_ahb;
};
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
{
u32 div;
unsigned long freq;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET);
div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT;
freq = clk_get_rate(pltfm_host->clk);
freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div);
return freq;
}
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_arasan_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static struct sdhci_pltfm_data sdhci_arasan_pdata = {
.ops = &sdhci_arasan_ops,
};
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
* @dev: Address of the device structure
* Returns 0 on success and error value on error
*
* Put the device in a low power state.
*/
static int sdhci_arasan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
int ret;
ret = sdhci_suspend_host(host);
if (ret)
return ret;
clk_disable(pltfm_host->clk);
clk_disable(sdhci_arasan->clk_ahb);
return 0;
}
/**
* sdhci_arasan_resume - Resume method for the driver
* @dev: Address of the device structure
* Returns 0 on success and error value on error
*
* Resume operation after suspend
*/
static int sdhci_arasan_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
int ret;
ret = clk_enable(sdhci_arasan->clk_ahb);
if (ret) {
dev_err(dev, "Cannot enable AHB clock.\n");
return ret;
}
ret = clk_enable(pltfm_host->clk);
if (ret) {
dev_err(dev, "Cannot enable SD clock.\n");
clk_disable(sdhci_arasan->clk_ahb);
return ret;
}
return sdhci_resume_host(host);
}
#endif /* ! CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend,
sdhci_arasan_resume);
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
struct clk *clk_xin;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
GFP_KERNEL);
if (!sdhci_arasan)
return -ENOMEM;
sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
dev_err(&pdev->dev, "clk_ahb clock not found.\n");
return PTR_ERR(sdhci_arasan->clk_ahb);
}
clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
if (IS_ERR(clk_xin)) {
dev_err(&pdev->dev, "clk_xin clock not found.\n");
return PTR_ERR(clk_xin);
}
ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
if (ret) {
dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
return ret;
}
ret = clk_prepare_enable(clk_xin);
if (ret) {
dev_err(&pdev->dev, "Unable to enable SD clock.\n");
goto clk_dis_ahb;
}
host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_err(&pdev->dev, "platform init failed (%u)\n", ret);
goto clk_disable_all;
}
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
pltfm_host->priv = sdhci_arasan;
pltfm_host->clk = clk_xin;
ret = sdhci_add_host(host);
if (ret) {
dev_err(&pdev->dev, "platform register failed (%u)\n", ret);
goto err_pltfm_free;
}
return 0;
err_pltfm_free:
sdhci_pltfm_free(pdev);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
clk_disable_unprepare(sdhci_arasan->clk_ahb);
return ret;
}
static int sdhci_arasan_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(sdhci_arasan->clk_ahb);
return sdhci_pltfm_unregister(pdev);
}
static const struct of_device_id sdhci_arasan_of_match[] = {
{ .compatible = "arasan,sdhci-8.9a" },
{ }
};
MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match);
static struct platform_driver sdhci_arasan_driver = {
.driver = {
.name = "sdhci-arasan",
.owner = THIS_MODULE,
.of_match_table = sdhci_arasan_of_match,
.pm = &sdhci_arasan_dev_pm_ops,
},
.probe = sdhci_arasan_probe,
.remove = sdhci_arasan_remove,
};
module_platform_driver(sdhci_arasan_driver);
MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller");
MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
santod/KK_sense_kernel_htc_m7vzw | arch/arm/mach-msm/board-m7-rfkill.c | 158 | 8204 | /*
* Copyright (C) 2009 Google, Inc.
* Copyright (C) 2009-2011 HTC Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <linux/mfd/pm8xxx/pm8921.h>
#include <mach/htc_4335_wl_reg.h>
#include "board-m7.h"
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#define BTLOCK_NAME "btlock"
#define BTLOCK_MINOR MISC_DYNAMIC_MINOR
#define BTLOCK_TIMEOUT 2
#define PR(msg, ...) printk("####"msg, ##__VA_ARGS__)
struct btlock {
int lock;
int cookie;
};
static struct semaphore btlock;
static int count = 1;
static int owner_cookie = -1;
int bcm_bt_lock(int cookie)
{
int ret;
char cookie_msg[5] = {0};
ret = down_timeout(&btlock, msecs_to_jiffies(BTLOCK_TIMEOUT*1000));
if (ret == 0) {
memcpy(cookie_msg, &cookie, sizeof(cookie));
owner_cookie = cookie;
count--;
PR("btlock acquired cookie: %s\n", cookie_msg);
}
return ret;
}
void bcm_bt_unlock(int cookie)
{
char owner_msg[5] = {0};
char cookie_msg[5] = {0};
memcpy(cookie_msg, &cookie, sizeof(cookie));
if (owner_cookie == cookie) {
owner_cookie = -1;
if (count++ > 1)
PR("error, release a lock that was not acquired**\n");
up(&btlock);
PR("btlock released, cookie: %s\n", cookie_msg);
} else {
memcpy(owner_msg, &owner_cookie, sizeof(owner_cookie));
PR("ignore lock release, cookie mismatch: %s owner %s \n", cookie_msg,
owner_cookie == 0 ? "NULL" : owner_msg);
}
}
static int btlock_open(struct inode *inode, struct file *file)
{
return 0;
}
static int btlock_release(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t btlock_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct btlock lock_para;
ssize_t ret = 0;
if (count < sizeof(struct btlock))
return -EINVAL;
if (copy_from_user(&lock_para, buffer, sizeof(struct btlock))) {
return -EFAULT;
}
if (lock_para.lock == 0) {
bcm_bt_unlock(lock_para.cookie);
} else if (lock_para.lock == 1) {
ret = bcm_bt_lock(lock_para.cookie);
} else if (lock_para.lock == 2) {
ret = bcm_bt_lock(lock_para.cookie);
}
return ret;
}
static const struct file_operations btlock_fops = {
.owner = THIS_MODULE,
.open = btlock_open,
.release = btlock_release,
.write = btlock_write,
};
static struct miscdevice btlock_misc = {
.name = BTLOCK_NAME,
.minor = BTLOCK_MINOR,
.fops = &btlock_fops,
};
static int bcm_btlock_init(void)
{
int ret;
PR("init\n");
ret = misc_register(&btlock_misc);
if (ret != 0) {
PR("Error: failed to register Misc driver, ret = %d\n", ret);
return ret;
}
sema_init(&btlock, 1);
return ret;
}
static void bcm_btlock_exit(void)
{
PR("btlock_exit:\n");
misc_deregister(&btlock_misc);
}
static struct rfkill *bt_rfk;
static const char bt_name[] = "bcm4334";
extern unsigned int system_rev;
struct pm8xxx_gpio_init {
unsigned gpio;
struct pm_gpio config;
};
#define PM8XXX_GPIO_INIT(_gpio, _dir, _buf, _val, _pull, _vin, _out_strength, \
_func, _inv, _disable) \
{ \
.gpio = PM8921_GPIO_PM_TO_SYS(_gpio), \
.config = { \
.direction = _dir, \
.output_buffer = _buf, \
.output_value = _val, \
.pull = _pull, \
.vin_sel = _vin, \
.out_strength = _out_strength, \
.function = _func, \
.inv_int_pol = _inv, \
.disable_pin = _disable, \
} \
}
struct pm8xxx_gpio_init m7_bt_pmic_gpio[] = {
PM8XXX_GPIO_INIT(BT_REG_ON, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_LOW, \
PM_GPIO_FUNC_NORMAL, 0, 0),
PM8XXX_GPIO_INIT(BT_WAKE, PM_GPIO_DIR_OUT, PM_GPIO_OUT_BUF_CMOS, 0, \
PM_GPIO_PULL_NO, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_LOW, \
PM_GPIO_FUNC_NORMAL, 0, 0),
PM8XXX_GPIO_INIT(BT_HOST_WAKE, PM_GPIO_DIR_IN, PM_GPIO_OUT_BUF_CMOS, 0, \
PM_GPIO_PULL_DN, PM_GPIO_VIN_S4, \
PM_GPIO_STRENGTH_NO, \
PM_GPIO_FUNC_NORMAL, 0, 0),
};
static uint32_t m7_GPIO_bt_on_table[] = {
GPIO_CFG(BT_UART_RTSz,
2,
GPIO_CFG_OUTPUT,
GPIO_CFG_NO_PULL,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_CTSz,
2,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_UP,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_RX,
2,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_UP,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_TX,
2,
GPIO_CFG_OUTPUT,
GPIO_CFG_NO_PULL,
GPIO_CFG_4MA),
};
static uint32_t m7_GPIO_bt_off_table[] = {
GPIO_CFG(BT_UART_RTSz,
0,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_DOWN,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_CTSz,
0,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_DOWN,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_RX,
0,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_DOWN,
GPIO_CFG_4MA),
GPIO_CFG(BT_UART_TX,
0,
GPIO_CFG_INPUT,
GPIO_CFG_PULL_DOWN,
GPIO_CFG_4MA),
};
static void config_bt_table(uint32_t *table, int len)
{
int n, rc;
for (n = 0; n < len; n++) {
rc = gpio_tlmm_config(table[n], GPIO_CFG_ENABLE);
if (rc) {
pr_err("[BT]%s: gpio_tlmm_config(%#x)=%d\n",
__func__, table[n], rc);
break;
}
}
}
static void m7_GPIO_config_bt_on(void)
{
printk(KERN_INFO "[BT]== R ON ==\n");
config_bt_table(m7_GPIO_bt_on_table,
ARRAY_SIZE(m7_GPIO_bt_on_table));
mdelay(2);
if (system_rev < XC) {
printk(KERN_INFO "[BT]XA XB\n");
htc_BCM4335_wl_reg_ctl(BCM4335_WL_REG_ON, ID_BT);
mdelay(5);
}
gpio_set_value(PM8921_GPIO_PM_TO_SYS(BT_REG_ON), 0);
mdelay(5);
gpio_set_value(PM8921_GPIO_PM_TO_SYS(BT_WAKE), 0);
mdelay(5);
gpio_set_value(PM8921_GPIO_PM_TO_SYS(BT_REG_ON), 1);
mdelay(1);
}
static void m7_GPIO_config_bt_off(void)
{
if (system_rev < XC) {
htc_BCM4335_wl_reg_ctl(BCM4335_WL_REG_OFF, ID_BT);
mdelay(5);
}
gpio_set_value(PM8921_GPIO_PM_TO_SYS(BT_REG_ON), 0);
mdelay(1);
config_bt_table(m7_GPIO_bt_off_table,
ARRAY_SIZE(m7_GPIO_bt_off_table));
mdelay(2);
gpio_set_value(PM8921_GPIO_PM_TO_SYS(BT_WAKE), 0);
printk(KERN_INFO "[BT]== R OFF ==\n");
}
static int bluetooth_set_power(void *data, bool blocked)
{
if (!blocked)
m7_GPIO_config_bt_on();
else
m7_GPIO_config_bt_off();
return 0;
}
static struct rfkill_ops m7_rfkill_ops = {
.set_block = bluetooth_set_power,
};
static int m7_rfkill_probe(struct platform_device *pdev)
{
int rc = 0;
bool default_state = true;
int i=0;
mdelay(2);
for( i = 0; i < ARRAY_SIZE(m7_bt_pmic_gpio); i++) {
rc = pm8xxx_gpio_config(m7_bt_pmic_gpio[i].gpio,
&m7_bt_pmic_gpio[i].config);
if (rc)
pr_info("[bt] %s: Config ERROR: GPIO=%u, rc=%d\n",
__func__, m7_bt_pmic_gpio[i].gpio, rc);
}
bcm_btlock_init();
bluetooth_set_power(NULL, default_state);
bt_rfk = rfkill_alloc(bt_name, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
&m7_rfkill_ops, NULL);
if (!bt_rfk) {
rc = -ENOMEM;
goto err_rfkill_alloc;
}
rfkill_set_states(bt_rfk, default_state, false);
rc = rfkill_register(bt_rfk);
if (rc)
goto err_rfkill_reg;
return 0;
err_rfkill_reg:
rfkill_destroy(bt_rfk);
err_rfkill_alloc:
return rc;
}
static int m7_rfkill_remove(struct platform_device *dev)
{
rfkill_unregister(bt_rfk);
rfkill_destroy(bt_rfk);
bcm_btlock_exit();
return 0;
}
static struct platform_driver m7_rfkill_driver = {
.probe = m7_rfkill_probe,
.remove = m7_rfkill_remove,
.driver = {
.name = "m7_rfkill",
.owner = THIS_MODULE,
},
};
static int __init m7_rfkill_init(void)
{
return platform_driver_register(&m7_rfkill_driver);
}
static void __exit m7_rfkill_exit(void)
{
platform_driver_unregister(&m7_rfkill_driver);
}
module_init(m7_rfkill_init);
module_exit(m7_rfkill_exit);
MODULE_DESCRIPTION("m7 rfkill");
MODULE_AUTHOR("htc_ssdbt <htc_ssdbt@htc.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
penhoi/linux-3.13.11.lbrpmu | drivers/memstick/host/jmb38x_ms.c | 1438 | 26160 | /*
* jmb38x_ms.c - JMicron jmb38x MemoryStick card reader
*
* Copyright (C) 2008 Alex Dubov <oakad@yahoo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/memstick.h>
#include <linux/slab.h>
#include <linux/module.h>
#define DRIVER_NAME "jmb38x_ms"
static bool no_dma;
module_param(no_dma, bool, 0644);
enum {
DMA_ADDRESS = 0x00,
BLOCK = 0x04,
DMA_CONTROL = 0x08,
TPC_P0 = 0x0c,
TPC_P1 = 0x10,
TPC = 0x14,
HOST_CONTROL = 0x18,
DATA = 0x1c,
STATUS = 0x20,
INT_STATUS = 0x24,
INT_STATUS_ENABLE = 0x28,
INT_SIGNAL_ENABLE = 0x2c,
TIMER = 0x30,
TIMER_CONTROL = 0x34,
PAD_OUTPUT_ENABLE = 0x38,
PAD_PU_PD = 0x3c,
CLOCK_DELAY = 0x40,
ADMA_ADDRESS = 0x44,
CLOCK_CONTROL = 0x48,
LED_CONTROL = 0x4c,
VERSION = 0x50
};
struct jmb38x_ms_host {
struct jmb38x_ms *chip;
void __iomem *addr;
spinlock_t lock;
struct tasklet_struct notify;
int id;
char host_id[32];
int irq;
unsigned int block_pos;
unsigned long timeout_jiffies;
struct timer_list timer;
struct memstick_request *req;
unsigned char cmd_flags;
unsigned char io_pos;
unsigned char ifmode;
unsigned int io_word[2];
};
struct jmb38x_ms {
struct pci_dev *pdev;
int host_cnt;
struct memstick_host *hosts[];
};
#define BLOCK_COUNT_MASK 0xffff0000
#define BLOCK_SIZE_MASK 0x00000fff
#define DMA_CONTROL_ENABLE 0x00000001
#define TPC_DATA_SEL 0x00008000
#define TPC_DIR 0x00004000
#define TPC_WAIT_INT 0x00002000
#define TPC_GET_INT 0x00000800
#define TPC_CODE_SZ_MASK 0x00000700
#define TPC_DATA_SZ_MASK 0x00000007
#define HOST_CONTROL_TDELAY_EN 0x00040000
#define HOST_CONTROL_HW_OC_P 0x00010000
#define HOST_CONTROL_RESET_REQ 0x00008000
#define HOST_CONTROL_REI 0x00004000
#define HOST_CONTROL_LED 0x00000400
#define HOST_CONTROL_FAST_CLK 0x00000200
#define HOST_CONTROL_RESET 0x00000100
#define HOST_CONTROL_POWER_EN 0x00000080
#define HOST_CONTROL_CLOCK_EN 0x00000040
#define HOST_CONTROL_REO 0x00000008
#define HOST_CONTROL_IF_SHIFT 4
#define HOST_CONTROL_IF_SERIAL 0x0
#define HOST_CONTROL_IF_PAR4 0x1
#define HOST_CONTROL_IF_PAR8 0x3
#define STATUS_BUSY 0x00080000
#define STATUS_MS_DAT7 0x00040000
#define STATUS_MS_DAT6 0x00020000
#define STATUS_MS_DAT5 0x00010000
#define STATUS_MS_DAT4 0x00008000
#define STATUS_MS_DAT3 0x00004000
#define STATUS_MS_DAT2 0x00002000
#define STATUS_MS_DAT1 0x00001000
#define STATUS_MS_DAT0 0x00000800
#define STATUS_HAS_MEDIA 0x00000400
#define STATUS_FIFO_EMPTY 0x00000200
#define STATUS_FIFO_FULL 0x00000100
#define STATUS_MS_CED 0x00000080
#define STATUS_MS_ERR 0x00000040
#define STATUS_MS_BRQ 0x00000020
#define STATUS_MS_CNK 0x00000001
#define INT_STATUS_TPC_ERR 0x00080000
#define INT_STATUS_CRC_ERR 0x00040000
#define INT_STATUS_TIMER_TO 0x00020000
#define INT_STATUS_HSK_TO 0x00010000
#define INT_STATUS_ANY_ERR 0x00008000
#define INT_STATUS_FIFO_WRDY 0x00000080
#define INT_STATUS_FIFO_RRDY 0x00000040
#define INT_STATUS_MEDIA_OUT 0x00000010
#define INT_STATUS_MEDIA_IN 0x00000008
#define INT_STATUS_DMA_BOUNDARY 0x00000004
#define INT_STATUS_EOTRAN 0x00000002
#define INT_STATUS_EOTPC 0x00000001
#define INT_STATUS_ALL 0x000f801f
#define PAD_OUTPUT_ENABLE_MS 0x0F3F
#define PAD_PU_PD_OFF 0x7FFF0000
#define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
#define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
#define CLOCK_CONTROL_BY_MMIO 0x00000008
#define CLOCK_CONTROL_40MHZ 0x00000001
#define CLOCK_CONTROL_50MHZ 0x00000002
#define CLOCK_CONTROL_60MHZ 0x00000010
#define CLOCK_CONTROL_62_5MHZ 0x00000004
#define CLOCK_CONTROL_OFF 0x00000000
#define PCI_CTL_CLOCK_DLY_ADDR 0x000000b0
enum {
CMD_READY = 0x01,
FIFO_READY = 0x02,
REG_DATA = 0x04,
DMA_DATA = 0x08
};
static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host,
unsigned char *buf, unsigned int length)
{
unsigned int off = 0;
while (host->io_pos && length) {
buf[off++] = host->io_word[0] & 0xff;
host->io_word[0] >>= 8;
length--;
host->io_pos--;
}
if (!length)
return off;
while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
if (length < 4)
break;
*(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA);
length -= 4;
off += 4;
}
if (length
&& !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
host->io_word[0] = readl(host->addr + DATA);
for (host->io_pos = 4; host->io_pos; --host->io_pos) {
buf[off++] = host->io_word[0] & 0xff;
host->io_word[0] >>= 8;
length--;
if (!length)
break;
}
}
return off;
}
static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host,
unsigned char *buf,
unsigned int length)
{
unsigned int off = 0;
while (host->io_pos > 4 && length) {
buf[off++] = host->io_word[0] & 0xff;
host->io_word[0] >>= 8;
length--;
host->io_pos--;
}
if (!length)
return off;
while (host->io_pos && length) {
buf[off++] = host->io_word[1] & 0xff;
host->io_word[1] >>= 8;
length--;
host->io_pos--;
}
return off;
}
static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host,
unsigned char *buf,
unsigned int length)
{
unsigned int off = 0;
if (host->io_pos) {
while (host->io_pos < 4 && length) {
host->io_word[0] |= buf[off++] << (host->io_pos * 8);
host->io_pos++;
length--;
}
}
if (host->io_pos == 4
&& !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
writel(host->io_word[0], host->addr + DATA);
host->io_pos = 0;
host->io_word[0] = 0;
} else if (host->io_pos) {
return off;
}
if (!length)
return off;
while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
if (length < 4)
break;
__raw_writel(*(unsigned int *)(buf + off),
host->addr + DATA);
length -= 4;
off += 4;
}
switch (length) {
case 3:
host->io_word[0] |= buf[off + 2] << 16;
host->io_pos++;
case 2:
host->io_word[0] |= buf[off + 1] << 8;
host->io_pos++;
case 1:
host->io_word[0] |= buf[off];
host->io_pos++;
}
off += host->io_pos;
return off;
}
static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host,
unsigned char *buf,
unsigned int length)
{
unsigned int off = 0;
while (host->io_pos < 4 && length) {
host->io_word[0] &= ~(0xff << (host->io_pos * 8));
host->io_word[0] |= buf[off++] << (host->io_pos * 8);
host->io_pos++;
length--;
}
if (!length)
return off;
while (host->io_pos < 8 && length) {
host->io_word[1] &= ~(0xff << (host->io_pos * 8));
host->io_word[1] |= buf[off++] << (host->io_pos * 8);
host->io_pos++;
length--;
}
return off;
}
static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
{
unsigned int length;
unsigned int off;
unsigned int t_size, p_cnt;
unsigned char *buf;
struct page *pg;
unsigned long flags = 0;
if (host->req->long_data) {
length = host->req->sg.length - host->block_pos;
off = host->req->sg.offset + host->block_pos;
} else {
length = host->req->data_len - host->block_pos;
off = 0;
}
while (length) {
unsigned int uninitialized_var(p_off);
if (host->req->long_data) {
pg = nth_page(sg_page(&host->req->sg),
off >> PAGE_SHIFT);
p_off = offset_in_page(off);
p_cnt = PAGE_SIZE - p_off;
p_cnt = min(p_cnt, length);
local_irq_save(flags);
buf = kmap_atomic(pg) + p_off;
} else {
buf = host->req->data + host->block_pos;
p_cnt = host->req->data_len - host->block_pos;
}
if (host->req->data_dir == WRITE)
t_size = !(host->cmd_flags & REG_DATA)
? jmb38x_ms_write_data(host, buf, p_cnt)
: jmb38x_ms_write_reg_data(host, buf, p_cnt);
else
t_size = !(host->cmd_flags & REG_DATA)
? jmb38x_ms_read_data(host, buf, p_cnt)
: jmb38x_ms_read_reg_data(host, buf, p_cnt);
if (host->req->long_data) {
kunmap_atomic(buf - p_off);
local_irq_restore(flags);
}
if (!t_size)
break;
host->block_pos += t_size;
length -= t_size;
off += t_size;
}
if (!length && host->req->data_dir == WRITE) {
if (host->cmd_flags & REG_DATA) {
writel(host->io_word[0], host->addr + TPC_P0);
writel(host->io_word[1], host->addr + TPC_P1);
} else if (host->io_pos) {
writel(host->io_word[0], host->addr + DATA);
}
}
return length;
}
static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned char *data;
unsigned int data_len, cmd, t_val;
if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) {
dev_dbg(&msh->dev, "no media status\n");
host->req->error = -ETIME;
return host->req->error;
}
dev_dbg(&msh->dev, "control %08x\n", readl(host->addr + HOST_CONTROL));
dev_dbg(&msh->dev, "status %08x\n", readl(host->addr + INT_STATUS));
dev_dbg(&msh->dev, "hstatus %08x\n", readl(host->addr + STATUS));
host->cmd_flags = 0;
host->block_pos = 0;
host->io_pos = 0;
host->io_word[0] = 0;
host->io_word[1] = 0;
cmd = host->req->tpc << 16;
cmd |= TPC_DATA_SEL;
if (host->req->data_dir == READ)
cmd |= TPC_DIR;
if (host->req->need_card_int) {
if (host->ifmode == MEMSTICK_SERIAL)
cmd |= TPC_GET_INT;
else
cmd |= TPC_WAIT_INT;
}
data = host->req->data;
if (!no_dma)
host->cmd_flags |= DMA_DATA;
if (host->req->long_data) {
data_len = host->req->sg.length;
} else {
data_len = host->req->data_len;
host->cmd_flags &= ~DMA_DATA;
}
if (data_len <= 8) {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
cmd |= data_len & 0xf;
host->cmd_flags &= ~DMA_DATA;
}
if (host->cmd_flags & DMA_DATA) {
if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE)) {
host->req->error = -ENOMEM;
return host->req->error;
}
data_len = sg_dma_len(&host->req->sg);
writel(sg_dma_address(&host->req->sg),
host->addr + DMA_ADDRESS);
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL);
} else if (!(host->cmd_flags & REG_DATA)) {
writel(((1 << 16) & BLOCK_COUNT_MASK)
| (data_len & BLOCK_SIZE_MASK),
host->addr + BLOCK);
t_val = readl(host->addr + INT_STATUS_ENABLE);
t_val |= host->req->data_dir == READ
? INT_STATUS_FIFO_RRDY
: INT_STATUS_FIFO_WRDY;
writel(t_val, host->addr + INT_STATUS_ENABLE);
writel(t_val, host->addr + INT_SIGNAL_ENABLE);
} else {
cmd &= ~(TPC_DATA_SEL | 0xf);
host->cmd_flags |= REG_DATA;
cmd |= data_len & 0xf;
if (host->req->data_dir == WRITE) {
jmb38x_ms_transfer_data(host);
writel(host->io_word[0], host->addr + TPC_P0);
writel(host->io_word[1], host->addr + TPC_P1);
}
}
mod_timer(&host->timer, jiffies + host->timeout_jiffies);
writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
host->req->error = 0;
writel(cmd, host->addr + TPC);
dev_dbg(&msh->dev, "executing TPC %08x, len %x\n", cmd, data_len);
return 0;
}
static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned int t_val = 0;
int rc;
del_timer(&host->timer);
dev_dbg(&msh->dev, "c control %08x\n",
readl(host->addr + HOST_CONTROL));
dev_dbg(&msh->dev, "c status %08x\n",
readl(host->addr + INT_STATUS));
dev_dbg(&msh->dev, "c hstatus %08x\n", readl(host->addr + STATUS));
host->req->int_reg = readl(host->addr + STATUS) & 0xff;
writel(0, host->addr + BLOCK);
writel(0, host->addr + DMA_CONTROL);
if (host->cmd_flags & DMA_DATA) {
pci_unmap_sg(host->chip->pdev, &host->req->sg, 1,
host->req->data_dir == READ
? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
} else {
t_val = readl(host->addr + INT_STATUS_ENABLE);
if (host->req->data_dir == READ)
t_val &= ~INT_STATUS_FIFO_RRDY;
else
t_val &= ~INT_STATUS_FIFO_WRDY;
writel(t_val, host->addr + INT_STATUS_ENABLE);
writel(t_val, host->addr + INT_SIGNAL_ENABLE);
}
writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
if (!last) {
do {
rc = memstick_next_req(msh, &host->req);
} while (!rc && jmb38x_ms_issue_cmd(msh));
} else {
do {
rc = memstick_next_req(msh, &host->req);
if (!rc)
host->req->error = -ETIME;
} while (!rc);
}
}
static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
{
struct memstick_host *msh = dev_id;
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned int irq_status;
spin_lock(&host->lock);
irq_status = readl(host->addr + INT_STATUS);
dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status);
if (irq_status == 0 || irq_status == (~0)) {
spin_unlock(&host->lock);
return IRQ_NONE;
}
if (host->req) {
if (irq_status & INT_STATUS_ANY_ERR) {
if (irq_status & INT_STATUS_CRC_ERR)
host->req->error = -EILSEQ;
else if (irq_status & INT_STATUS_TPC_ERR) {
dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
jmb38x_ms_complete_cmd(msh, 0);
} else
host->req->error = -ETIME;
} else {
if (host->cmd_flags & DMA_DATA) {
if (irq_status & INT_STATUS_EOTRAN)
host->cmd_flags |= FIFO_READY;
} else {
if (irq_status & (INT_STATUS_FIFO_RRDY
| INT_STATUS_FIFO_WRDY))
jmb38x_ms_transfer_data(host);
if (irq_status & INT_STATUS_EOTRAN) {
jmb38x_ms_transfer_data(host);
host->cmd_flags |= FIFO_READY;
}
}
if (irq_status & INT_STATUS_EOTPC) {
host->cmd_flags |= CMD_READY;
if (host->cmd_flags & REG_DATA) {
if (host->req->data_dir == READ) {
host->io_word[0]
= readl(host->addr
+ TPC_P0);
host->io_word[1]
= readl(host->addr
+ TPC_P1);
host->io_pos = 8;
jmb38x_ms_transfer_data(host);
}
host->cmd_flags |= FIFO_READY;
}
}
}
}
if (irq_status & (INT_STATUS_MEDIA_IN | INT_STATUS_MEDIA_OUT)) {
dev_dbg(&host->chip->pdev->dev, "media changed\n");
memstick_detect_change(msh);
}
writel(irq_status, host->addr + INT_STATUS);
if (host->req
&& (((host->cmd_flags & CMD_READY)
&& (host->cmd_flags & FIFO_READY))
|| host->req->error))
jmb38x_ms_complete_cmd(msh, 0);
spin_unlock(&host->lock);
return IRQ_HANDLED;
}
static void jmb38x_ms_abort(unsigned long data)
{
struct memstick_host *msh = (struct memstick_host *)data;
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned long flags;
dev_dbg(&host->chip->pdev->dev, "abort\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
host->req->error = -ETIME;
jmb38x_ms_complete_cmd(msh, 0);
}
spin_unlock_irqrestore(&host->lock, flags);
}
static void jmb38x_ms_req_tasklet(unsigned long data)
{
struct memstick_host *msh = (struct memstick_host *)data;
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned long flags;
int rc;
spin_lock_irqsave(&host->lock, flags);
if (!host->req) {
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc);
} while (!rc && jmb38x_ms_issue_cmd(msh));
}
spin_unlock_irqrestore(&host->lock, flags);
}
static void jmb38x_ms_dummy_submit(struct memstick_host *msh)
{
return;
}
static void jmb38x_ms_submit_req(struct memstick_host *msh)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
tasklet_schedule(&host->notify);
}
static int jmb38x_ms_reset(struct jmb38x_ms_host *host)
{
int cnt;
writel(HOST_CONTROL_RESET_REQ | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET_REQ
& readl(host->addr + HOST_CONTROL)))
goto reset_next;
ndelay(20);
}
dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
reset_next:
writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
| readl(host->addr + HOST_CONTROL),
host->addr + HOST_CONTROL);
mmiowb();
for (cnt = 0; cnt < 20; ++cnt) {
if (!(HOST_CONTROL_RESET
& readl(host->addr + HOST_CONTROL)))
goto reset_ok;
ndelay(20);
}
dev_dbg(&host->chip->pdev->dev, "reset timeout\n");
return -EIO;
reset_ok:
mmiowb();
writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
return 0;
}
static int jmb38x_ms_set_param(struct memstick_host *msh,
enum memstick_param param,
int value)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
int rc = 0;
switch (param) {
case MEMSTICK_POWER:
if (value == MEMSTICK_POWER_ON) {
rc = jmb38x_ms_reset(host);
if (rc)
return rc;
host_ctl = 7;
host_ctl |= HOST_CONTROL_POWER_EN
| HOST_CONTROL_CLOCK_EN;
writel(host_ctl, host->addr + HOST_CONTROL);
writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
: PAD_PU_PD_ON_MS_SOCK0,
host->addr + PAD_PU_PD);
writel(PAD_OUTPUT_ENABLE_MS,
host->addr + PAD_OUTPUT_ENABLE);
msleep(10);
dev_dbg(&host->chip->pdev->dev, "power on\n");
} else if (value == MEMSTICK_POWER_OFF) {
host_ctl &= ~(HOST_CONTROL_POWER_EN
| HOST_CONTROL_CLOCK_EN);
writel(host_ctl, host->addr + HOST_CONTROL);
writel(0, host->addr + PAD_OUTPUT_ENABLE);
writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD);
dev_dbg(&host->chip->pdev->dev, "power off\n");
} else
return -EINVAL;
break;
case MEMSTICK_INTERFACE:
dev_dbg(&host->chip->pdev->dev,
"Set Host Interface Mode to %d\n", value);
host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
HOST_CONTROL_REO);
host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
if (value == MEMSTICK_SERIAL) {
host_ctl |= HOST_CONTROL_IF_SERIAL
<< HOST_CONTROL_IF_SHIFT;
host_ctl |= HOST_CONTROL_REI;
clock_ctl |= CLOCK_CONTROL_40MHZ;
clock_delay = 0;
} else if (value == MEMSTICK_PAR4) {
host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR4
<< HOST_CONTROL_IF_SHIFT;
host_ctl |= HOST_CONTROL_REO;
clock_ctl |= CLOCK_CONTROL_40MHZ;
clock_delay = 4;
} else if (value == MEMSTICK_PAR8) {
host_ctl |= HOST_CONTROL_FAST_CLK;
host_ctl |= HOST_CONTROL_IF_PAR8
<< HOST_CONTROL_IF_SHIFT;
clock_ctl |= CLOCK_CONTROL_50MHZ;
clock_delay = 0;
} else
return -EINVAL;
writel(host_ctl, host->addr + HOST_CONTROL);
writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
writel(clock_ctl, host->addr + CLOCK_CONTROL);
pci_write_config_byte(host->chip->pdev,
PCI_CTL_CLOCK_DLY_ADDR + 1,
clock_delay);
host->ifmode = value;
break;
};
return 0;
}
#define PCI_PMOS0_CONTROL 0xae
#define PMOS0_ENABLE 0x01
#define PMOS0_OVERCURRENT_LEVEL_2_4V 0x06
#define PMOS0_EN_OVERCURRENT_DEBOUNCE 0x40
#define PMOS0_SW_LED_POLARITY_ENABLE 0x80
#define PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
PMOS0_OVERCURRENT_LEVEL_2_4V)
#define PCI_PMOS1_CONTROL 0xbd
#define PMOS1_ACTIVE_BITS 0x4a
#define PCI_CLOCK_CTL 0xb9
static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
{
unsigned char val;
pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
if (flag)
val |= PMOS0_ACTIVE_BITS;
else
val &= ~PMOS0_ACTIVE_BITS;
pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
if (pci_resource_flags(pdev, 1)) {
pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
if (flag)
val |= PMOS1_ACTIVE_BITS;
else
val &= ~PMOS1_ACTIVE_BITS;
pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
}
pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
return 0;
}
#ifdef CONFIG_PM
static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
{
struct jmb38x_ms *jm = pci_get_drvdata(dev);
int cnt;
for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
if (!jm->hosts[cnt])
break;
memstick_suspend_host(jm->hosts[cnt]);
}
pci_save_state(dev);
pci_enable_wake(dev, pci_choose_state(dev, state), 0);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
static int jmb38x_ms_resume(struct pci_dev *dev)
{
struct jmb38x_ms *jm = pci_get_drvdata(dev);
int rc;
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
rc = pci_enable_device(dev);
if (rc)
return rc;
pci_set_master(dev);
jmb38x_ms_pmos(dev, 1);
for (rc = 0; rc < jm->host_cnt; ++rc) {
if (!jm->hosts[rc])
break;
memstick_resume_host(jm->hosts[rc]);
memstick_detect_change(jm->hosts[rc]);
}
return 0;
}
#else
#define jmb38x_ms_suspend NULL
#define jmb38x_ms_resume NULL
#endif /* CONFIG_PM */
static int jmb38x_ms_count_slots(struct pci_dev *pdev)
{
int cnt, rc = 0;
for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
break;
if (256 != pci_resource_len(pdev, cnt))
break;
++rc;
}
return rc;
}
static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
{
struct memstick_host *msh;
struct jmb38x_ms_host *host;
msh = memstick_alloc_host(sizeof(struct jmb38x_ms_host),
&jm->pdev->dev);
if (!msh)
return NULL;
host = memstick_priv(msh);
host->chip = jm;
host->addr = ioremap(pci_resource_start(jm->pdev, cnt),
pci_resource_len(jm->pdev, cnt));
if (!host->addr)
goto err_out_free;
spin_lock_init(&host->lock);
host->id = cnt;
snprintf(host->host_id, sizeof(host->host_id), DRIVER_NAME ":slot%d",
host->id);
host->irq = jm->pdev->irq;
host->timeout_jiffies = msecs_to_jiffies(1000);
tasklet_init(&host->notify, jmb38x_ms_req_tasklet, (unsigned long)msh);
msh->request = jmb38x_ms_submit_req;
msh->set_param = jmb38x_ms_set_param;
msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh);
if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id,
msh))
return msh;
iounmap(host->addr);
err_out_free:
kfree(msh);
return NULL;
}
static void jmb38x_ms_free_host(struct memstick_host *msh)
{
struct jmb38x_ms_host *host = memstick_priv(msh);
free_irq(host->irq, msh);
iounmap(host->addr);
memstick_free_host(msh);
}
static int jmb38x_ms_probe(struct pci_dev *pdev,
const struct pci_device_id *dev_id)
{
struct jmb38x_ms *jm;
int pci_dev_busy = 0;
int rc, cnt;
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_enable_device(pdev);
if (rc)
return rc;
pci_set_master(pdev);
rc = pci_request_regions(pdev, DRIVER_NAME);
if (rc) {
pci_dev_busy = 1;
goto err_out;
}
jmb38x_ms_pmos(pdev, 1);
cnt = jmb38x_ms_count_slots(pdev);
if (!cnt) {
rc = -ENODEV;
pci_dev_busy = 1;
goto err_out;
}
jm = kzalloc(sizeof(struct jmb38x_ms)
+ cnt * sizeof(struct memstick_host *), GFP_KERNEL);
if (!jm) {
rc = -ENOMEM;
goto err_out_int;
}
jm->pdev = pdev;
jm->host_cnt = cnt;
pci_set_drvdata(pdev, jm);
for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
jm->hosts[cnt] = jmb38x_ms_alloc_host(jm, cnt);
if (!jm->hosts[cnt])
break;
rc = memstick_add_host(jm->hosts[cnt]);
if (rc) {
jmb38x_ms_free_host(jm->hosts[cnt]);
jm->hosts[cnt] = NULL;
break;
}
}
if (cnt)
return 0;
rc = -ENODEV;
pci_set_drvdata(pdev, NULL);
kfree(jm);
err_out_int:
pci_release_regions(pdev);
err_out:
if (!pci_dev_busy)
pci_disable_device(pdev);
return rc;
}
static void jmb38x_ms_remove(struct pci_dev *dev)
{
struct jmb38x_ms *jm = pci_get_drvdata(dev);
struct jmb38x_ms_host *host;
int cnt;
unsigned long flags;
for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
if (!jm->hosts[cnt])
break;
host = memstick_priv(jm->hosts[cnt]);
jm->hosts[cnt]->request = jmb38x_ms_dummy_submit;
tasklet_kill(&host->notify);
writel(0, host->addr + INT_SIGNAL_ENABLE);
writel(0, host->addr + INT_STATUS_ENABLE);
mmiowb();
dev_dbg(&jm->pdev->dev, "interrupts off\n");
spin_lock_irqsave(&host->lock, flags);
if (host->req) {
host->req->error = -ETIME;
jmb38x_ms_complete_cmd(jm->hosts[cnt], 1);
}
spin_unlock_irqrestore(&host->lock, flags);
memstick_remove_host(jm->hosts[cnt]);
dev_dbg(&jm->pdev->dev, "host removed\n");
jmb38x_ms_free_host(jm->hosts[cnt]);
}
jmb38x_ms_pmos(dev, 0);
pci_set_drvdata(dev, NULL);
pci_release_regions(dev);
pci_disable_device(dev);
kfree(jm);
}
static struct pci_device_id jmb38x_ms_id_tbl [] = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
{ }
};
static struct pci_driver jmb38x_ms_driver = {
.name = DRIVER_NAME,
.id_table = jmb38x_ms_id_tbl,
.probe = jmb38x_ms_probe,
.remove = jmb38x_ms_remove,
.suspend = jmb38x_ms_suspend,
.resume = jmb38x_ms_resume
};
module_pci_driver(jmb38x_ms_driver);
MODULE_AUTHOR("Alex Dubov");
MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl);
| gpl-2.0 |
zhuoyang/android_kernel_sony_msm8974 | drivers/clk/clk.c | 2718 | 37098 | /*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Standard functionality for the common clock API. See Documentation/clk.txt
*/
#include <linux/clk-private.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
#include <linux/debugfs.h>
static struct dentry *rootdir;
static struct dentry *orphandir;
static int inited = 0;
/* caller must hold prepare_lock */
static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
{
struct dentry *d;
int ret = -ENOMEM;
if (!clk || !pdentry) {
ret = -EINVAL;
goto out;
}
d = debugfs_create_dir(clk->name, pdentry);
if (!d)
goto out;
clk->dentry = d;
d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
(u32 *)&clk->rate);
if (!d)
goto err_out;
d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
(u32 *)&clk->flags);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
(u32 *)&clk->prepare_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
(u32 *)&clk->enable_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
(u32 *)&clk->notifier_count);
if (!d)
goto err_out;
ret = 0;
goto out;
err_out:
debugfs_remove(clk->dentry);
out:
return ret;
}
/* caller must hold prepare_lock */
static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
{
struct clk *child;
struct hlist_node *tmp;
int ret = -EINVAL;;
if (!clk || !pdentry)
goto out;
ret = clk_debug_create_one(clk, pdentry);
if (ret)
goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node)
clk_debug_create_subtree(child, clk->dentry);
ret = 0;
out:
return ret;
}
/**
* clk_debug_register - add a clk node to the debugfs clk tree
* @clk: the clk being added to the debugfs clk tree
*
* Dynamically adds a clk to the debugfs clk tree if debugfs has been
* initialized. Otherwise it bails out early since the debugfs clk tree
* will be created lazily by clk_debug_init as part of a late_initcall.
*
* Caller must hold prepare_lock. Only clk_init calls this function (so
* far) so this is taken care.
*/
static int clk_debug_register(struct clk *clk)
{
struct clk *parent;
struct dentry *pdentry;
int ret = 0;
if (!inited)
goto out;
parent = clk->parent;
/*
* Check to see if a clk is a root clk. Also check that it is
* safe to add this clk to debugfs
*/
if (!parent)
if (clk->flags & CLK_IS_ROOT)
pdentry = rootdir;
else
pdentry = orphandir;
else
if (parent->dentry)
pdentry = parent->dentry;
else
goto out;
ret = clk_debug_create_subtree(clk, pdentry);
out:
return ret;
}
/**
* clk_debug_init - lazily create the debugfs clk tree visualization
*
* clks are often initialized very early during boot before memory can
* be dynamically allocated and well before debugfs is setup.
* clk_debug_init walks the clk tree hierarchy while holding
* prepare_lock and creates the topology as part of a late_initcall,
* thus insuring that clks initialized very early will still be
* represented in the debugfs clk tree. This function should only be
* called once at boot-time, and all other clks added dynamically will
* be done so with clk_debug_register.
*/
static int __init clk_debug_init(void)
{
struct clk *clk;
struct hlist_node *tmp;
rootdir = debugfs_create_dir("clk", NULL);
if (!rootdir)
return -ENOMEM;
orphandir = debugfs_create_dir("orphans", rootdir);
if (!orphandir)
return -ENOMEM;
mutex_lock(&prepare_lock);
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
clk_debug_create_subtree(clk, orphandir);
inited = 1;
mutex_unlock(&prepare_lock);
return 0;
}
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
#endif /* CONFIG_COMMON_CLK_DEBUG */
#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
struct clk *child;
struct hlist_node *tmp;
unsigned long flags;
if (!clk)
goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node)
clk_disable_unused_subtree(child);
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
goto unlock_out;
if (clk->flags & CLK_IGNORE_UNUSED)
goto unlock_out;
if (__clk_is_enabled(clk) && clk->ops->disable)
clk->ops->disable(clk->hw);
unlock_out:
spin_unlock_irqrestore(&enable_lock, flags);
out:
return;
}
static int clk_disable_unused(void)
{
struct clk *clk;
struct hlist_node *tmp;
mutex_lock(&prepare_lock);
hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
clk_disable_unused_subtree(clk);
mutex_unlock(&prepare_lock);
return 0;
}
late_initcall(clk_disable_unused);
#else
static inline int clk_disable_unused(struct clk *clk) { return 0; }
#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
/*** helper functions ***/
inline const char *__clk_get_name(struct clk *clk)
{
return !clk ? NULL : clk->name;
}
inline struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->hw;
}
inline u8 __clk_get_num_parents(struct clk *clk)
{
return !clk ? -EINVAL : clk->num_parents;
}
inline struct clk *__clk_get_parent(struct clk *clk)
{
return !clk ? NULL : clk->parent;
}
inline int __clk_get_enable_count(struct clk *clk)
{
return !clk ? -EINVAL : clk->enable_count;
}
inline int __clk_get_prepare_count(struct clk *clk)
{
return !clk ? -EINVAL : clk->prepare_count;
}
unsigned long __clk_get_rate(struct clk *clk)
{
unsigned long ret;
if (!clk) {
ret = -EINVAL;
goto out;
}
ret = clk->rate;
if (clk->flags & CLK_IS_ROOT)
goto out;
if (!clk->parent)
ret = -ENODEV;
out:
return ret;
}
inline unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? -EINVAL : clk->flags;
}
int __clk_is_enabled(struct clk *clk)
{
int ret;
if (!clk)
return -EINVAL;
/*
* .is_enabled is only mandatory for clocks that gate
* fall back to software usage counter if .is_enabled is missing
*/
if (!clk->ops->is_enabled) {
ret = clk->enable_count ? 1 : 0;
goto out;
}
ret = clk->ops->is_enabled(clk->hw);
out:
return ret;
}
static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
{
struct clk *child;
struct clk *ret;
struct hlist_node *tmp;
if (!strcmp(clk->name, name))
return clk;
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
ret = __clk_lookup_subtree(name, child);
if (ret)
return ret;
}
return NULL;
}
struct clk *__clk_lookup(const char *name)
{
struct clk *root_clk;
struct clk *ret;
struct hlist_node *tmp;
if (!name)
return NULL;
/* search the 'proper' clk tree first */
hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
/* if not found, then search the orphan tree */
hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
ret = __clk_lookup_subtree(name, root_clk);
if (ret)
return ret;
}
return NULL;
}
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
{
if (!clk)
return;
if (WARN_ON(clk->prepare_count == 0))
return;
if (--clk->prepare_count > 0)
return;
WARN_ON(clk->enable_count > 0);
if (clk->ops->unprepare)
clk->ops->unprepare(clk->hw);
__clk_unprepare(clk->parent);
}
/**
* clk_unprepare - undo preparation of a clock source
* @clk: the clk being unprepare
*
* clk_unprepare may sleep, which differentiates it from clk_disable. In a
* simple case, clk_unprepare can be used instead of clk_disable to gate a clk
* if the operation may sleep. One example is a clk which is accessed over
* I2c. In the complex case a clk gate operation may require a fast and a slow
* part. It is this reason that clk_unprepare and clk_disable are not mutually
* exclusive. In fact clk_disable must be called before clk_unprepare.
*/
void clk_unprepare(struct clk *clk)
{
mutex_lock(&prepare_lock);
__clk_unprepare(clk);
mutex_unlock(&prepare_lock);
}
EXPORT_SYMBOL_GPL(clk_unprepare);
int __clk_prepare(struct clk *clk)
{
int ret = 0;
if (!clk)
return 0;
if (clk->prepare_count == 0) {
ret = __clk_prepare(clk->parent);
if (ret)
return ret;
if (clk->ops->prepare) {
ret = clk->ops->prepare(clk->hw);
if (ret) {
__clk_unprepare(clk->parent);
return ret;
}
}
}
clk->prepare_count++;
return 0;
}
/**
* clk_prepare - prepare a clock source
* @clk: the clk being prepared
*
* clk_prepare may sleep, which differentiates it from clk_enable. In a simple
* case, clk_prepare can be used instead of clk_enable to ungate a clk if the
* operation may sleep. One example is a clk which is accessed over I2c. In
* the complex case a clk ungate operation may require a fast and a slow part.
* It is this reason that clk_prepare and clk_enable are not mutually
* exclusive. In fact clk_prepare must be called before clk_enable.
* Returns 0 on success, -EERROR otherwise.
*/
int clk_prepare(struct clk *clk)
{
int ret;
mutex_lock(&prepare_lock);
ret = __clk_prepare(clk);
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_prepare);
static void __clk_disable(struct clk *clk)
{
if (!clk)
return;
if (WARN_ON(clk->enable_count == 0))
return;
if (--clk->enable_count > 0)
return;
if (clk->ops->disable)
clk->ops->disable(clk->hw);
__clk_disable(clk->parent);
}
/**
* clk_disable - gate a clock
* @clk: the clk being gated
*
* clk_disable must not sleep, which differentiates it from clk_unprepare. In
* a simple case, clk_disable can be used instead of clk_unprepare to gate a
* clk if the operation is fast and will never sleep. One example is a
* SoC-internal clk which is controlled via simple register writes. In the
* complex case a clk gate operation may require a fast and a slow part. It is
* this reason that clk_unprepare and clk_disable are not mutually exclusive.
* In fact clk_disable must be called before clk_unprepare.
*/
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&enable_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
static int __clk_enable(struct clk *clk)
{
int ret = 0;
if (!clk)
return 0;
if (WARN_ON(clk->prepare_count == 0))
return -ESHUTDOWN;
if (clk->enable_count == 0) {
ret = __clk_enable(clk->parent);
if (ret)
return ret;
if (clk->ops->enable) {
ret = clk->ops->enable(clk->hw);
if (ret) {
__clk_disable(clk->parent);
return ret;
}
}
}
clk->enable_count++;
return 0;
}
/**
* clk_enable - ungate a clock
* @clk: the clk being ungated
*
* clk_enable must not sleep, which differentiates it from clk_prepare. In a
* simple case, clk_enable can be used instead of clk_prepare to ungate a clk
* if the operation will never sleep. One example is a SoC-internal clk which
* is controlled via simple register writes. In the complex case a clk ungate
* operation may require a fast and a slow part. It is this reason that
* clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
* must be called before clk_enable. Returns 0 on success, -EERROR
* otherwise.
*/
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&enable_lock, flags);
ret = __clk_enable(clk);
spin_unlock_irqrestore(&enable_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(clk_enable);
/**
* clk_get_rate - return the rate of clk
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk. Does not query the hardware. If
* clk is NULL then returns -EINVAL.
*/
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
mutex_lock(&prepare_lock);
rate = __clk_get_rate(clk);
mutex_unlock(&prepare_lock);
return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
/**
* __clk_round_rate - round the given rate for a clk
* @clk: round the rate of this clock
*
* Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long unused;
if (!clk)
return -EINVAL;
if (!clk->ops->round_rate)
return clk->rate;
if (clk->flags & CLK_SET_RATE_PARENT)
return clk->ops->round_rate(clk->hw, rate, &unused);
else
return clk->ops->round_rate(clk->hw, rate, NULL);
}
/**
* clk_round_rate - round the given rate for a clk
* @clk: the clk for which we are rounding a rate
* @rate: the rate which is to be rounded
*
* Takes in a rate as input and rounds it to a rate that the clk can actually
* use which is then returned. If clk doesn't support round_rate operation
* then the parent rate is returned.
*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
mutex_lock(&prepare_lock);
ret = __clk_round_rate(clk, rate);
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
/**
* __clk_notify - call clk notifier chain
* @clk: struct clk * that is changing rate
* @msg: clk notifier type (see include/linux/clk.h)
* @old_rate: old clk rate
* @new_rate: new clk rate
*
* Triggers a notifier call chain on the clk rate-change notification
* for 'clk'. Passes a pointer to the struct clk and the previous
* and current rates to the notifier callback. Intended to be called by
* internal clock code only. Returns NOTIFY_DONE from the last driver
* called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
* a driver returns that.
*/
static int __clk_notify(struct clk *clk, unsigned long msg,
unsigned long old_rate, unsigned long new_rate)
{
struct clk_notifier *cn;
struct clk_notifier_data cnd;
int ret = NOTIFY_DONE;
cnd.clk = clk;
cnd.old_rate = old_rate;
cnd.new_rate = new_rate;
list_for_each_entry(cn, &clk_notifier_list, node) {
if (cn->clk == clk) {
ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
&cnd);
break;
}
}
return ret;
}
/**
* __clk_recalc_rates
* @clk: first clk in the subtree
* @msg: notification type (see include/linux/clk.h)
*
* Walks the subtree of clks starting with clk and recalculates rates as it
* goes. Note that if a clk does not implement the .recalc_rate callback then
* it is assumed that the clock will take on the rate of it's parent.
*
* clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
* if necessary.
*
* Caller must hold prepare_lock.
*/
static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
{
unsigned long old_rate;
unsigned long parent_rate = 0;
struct hlist_node *tmp;
struct clk *child;
old_rate = clk->rate;
if (clk->parent)
parent_rate = clk->parent->rate;
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
else
clk->rate = parent_rate;
/*
* ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
* & ABORT_RATE_CHANGE notifiers
*/
if (clk->notifier_count && msg)
__clk_notify(clk, msg, old_rate, clk->rate);
hlist_for_each_entry(child, tmp, &clk->children, child_node)
__clk_recalc_rates(child, msg);
}
/**
* __clk_speculate_rates
* @clk: first clk in the subtree
* @parent_rate: the "future" rate of clk's parent
*
* Walks the subtree of clks starting with clk, speculating rates as it
* goes and firing off PRE_RATE_CHANGE notifications as necessary.
*
* Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
* pre-rate change notifications and returns early if no clks in the
* subtree have subscribed to the notifications. Note that if a clk does not
* implement the .recalc_rate callback then it is assumed that the clock will
* take on the rate of it's parent.
*
* Caller must hold prepare_lock.
*/
static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
{
struct hlist_node *tmp;
struct clk *child;
unsigned long new_rate;
int ret = NOTIFY_DONE;
if (clk->ops->recalc_rate)
new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
else
new_rate = parent_rate;
/* abort the rate change if a driver returns NOTIFY_BAD */
if (clk->notifier_count)
ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
if (ret == NOTIFY_BAD)
goto out;
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
ret = __clk_speculate_rates(child, new_rate);
if (ret == NOTIFY_BAD)
break;
}
out:
return ret;
}
static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
{
struct clk *child;
struct hlist_node *tmp;
clk->new_rate = new_rate;
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
if (child->ops->recalc_rate)
child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
else
child->new_rate = new_rate;
clk_calc_subtree(child, child->new_rate);
}
}
/*
* calculate the new rates returning the topmost clock that has to be
* changed.
*/
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
unsigned long best_parent_rate = clk->parent->rate;
unsigned long new_rate;
if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
clk->new_rate = clk->rate;
return NULL;
}
if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
top = clk_calc_new_rates(clk->parent, rate);
new_rate = clk->new_rate = clk->parent->new_rate;
goto out;
}
if (clk->flags & CLK_SET_RATE_PARENT)
new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
else
new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
if (best_parent_rate != clk->parent->rate) {
top = clk_calc_new_rates(clk->parent, best_parent_rate);
goto out;
}
out:
clk_calc_subtree(clk, new_rate);
return top;
}
/*
* Notify about rate changes in a subtree. Always walk down the whole tree
* so that in case of an error we can walk down the whole tree again and
* abort the change.
*/
static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
{
struct hlist_node *tmp;
struct clk *child, *fail_clk = NULL;
int ret = NOTIFY_DONE;
if (clk->rate == clk->new_rate)
return 0;
if (clk->notifier_count) {
ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
if (ret == NOTIFY_BAD)
fail_clk = clk;
}
hlist_for_each_entry(child, tmp, &clk->children, child_node) {
clk = clk_propagate_rate_change(child, event);
if (clk)
fail_clk = clk;
}
return fail_clk;
}
/*
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
static void clk_change_rate(struct clk *clk)
{
struct clk *child;
unsigned long old_rate;
struct hlist_node *tmp;
old_rate = clk->rate;
if (clk->ops->set_rate)
clk->ops->set_rate(clk->hw, clk->new_rate);
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw,
clk->parent->rate);
else
clk->rate = clk->parent->rate;
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
hlist_for_each_entry(child, tmp, &clk->children, child_node)
clk_change_rate(child);
}
/**
* clk_set_rate - specify a new rate for clk
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* In the simplest case clk_set_rate will only change the rate of clk.
*
* If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
* will fail; only when the clk is disabled will it be able to change
* its rate.
*
* Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
* recursively propagate up to clk's parent; whether or not this happens
* depends on the outcome of clk's .round_rate implementation. If
* *parent_rate is 0 after calling .round_rate then upstream parent
* propagation is ignored. If *parent_rate comes back with a new rate
* for clk's parent then we propagate up to clk's parent and set it's
* rate. Upward propagation will continue until either a clk does not
* support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
* changes to clk's parent_rate. If there is a failure during upstream
* propagation then clk_set_rate will unwind and restore each clk's rate
* that had been successfully changed. Afterwards a rate change abort
* notification will be propagated downstream, starting from the clk
* that failed.
*
* At the end of all of the rate setting, clk_set_rate internally calls
* __clk_recalc_rates and propagates the rate changes downstream,
* starting from the highest clk whose rate was changed. This has the
* added benefit of propagating post-rate change notifiers.
*
* Note that while post-rate change and rate change abort notifications
* are guaranteed to be sent to a clk only once per call to
* clk_set_rate, pre-change notifications will be sent for every clk
* whose rate is changed. Stacking pre-change notifications is noisy
* for the drivers subscribed to them, but this allows drivers to react
* to intermediate clk rate changes up until the point where the final
* rate is achieved at the end of upstream propagation.
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate(struct clk *clk, unsigned long rate)
{
struct clk *top, *fail_clk;
int ret = 0;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
/* bail early if nothing to do */
if (rate == clk->rate)
goto out;
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(clk, rate);
if (!top) {
ret = -EINVAL;
goto out;
}
/* notify that we are about to change rates */
fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
if (fail_clk) {
pr_warn("%s: failed to set %s rate\n", __func__,
fail_clk->name);
clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
ret = -EBUSY;
goto out;
}
/* change the rates */
clk_change_rate(top);
mutex_unlock(&prepare_lock);
return 0;
out:
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
* clk_get_parent - return the parent of a clk
* @clk: the clk whose parent gets returned
*
* Simply returns clk->parent. Returns NULL if clk is NULL.
*/
struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
mutex_lock(&prepare_lock);
parent = __clk_get_parent(clk);
mutex_unlock(&prepare_lock);
return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
/*
* .get_parent is mandatory for clocks with multiple possible parents. It is
* optional for single-parent clocks. Always call .get_parent if it is
* available and WARN if it is missing for multi-parent clocks.
*
* For single-parent clocks without .get_parent, first check to see if the
* .parents array exists, and if so use it to avoid an expensive tree
* traversal. If .parents does not exist then walk the tree with __clk_lookup.
*/
static struct clk *__clk_init_parent(struct clk *clk)
{
struct clk *ret = NULL;
u8 index;
/* handle the trivial cases */
if (!clk->num_parents)
goto out;
if (clk->num_parents == 1) {
if (IS_ERR_OR_NULL(clk->parent))
ret = clk->parent = __clk_lookup(clk->parent_names[0]);
ret = clk->parent;
goto out;
}
if (!clk->ops->get_parent) {
WARN(!clk->ops->get_parent,
"%s: multi-parent clocks must implement .get_parent\n",
__func__);
goto out;
};
/*
* Do our best to cache parent clocks in clk->parents. This prevents
* unnecessary and expensive calls to __clk_lookup. We don't set
* clk->parent here; that is done by the calling function
*/
index = clk->ops->get_parent(clk->hw);
if (!clk->parents)
clk->parents =
kmalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
if (!clk->parents)
ret = __clk_lookup(clk->parent_names[index]);
else if (!clk->parents[index])
ret = clk->parents[index] =
__clk_lookup(clk->parent_names[index]);
else
ret = clk->parents[index];
out:
return ret;
}
void __clk_reparent(struct clk *clk, struct clk *new_parent)
{
#ifdef CONFIG_COMMON_CLK_DEBUG
struct dentry *d;
struct dentry *new_parent_d;
#endif
if (!clk || !new_parent)
return;
hlist_del(&clk->child_node);
if (new_parent)
hlist_add_head(&clk->child_node, &new_parent->children);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
#ifdef CONFIG_COMMON_CLK_DEBUG
if (!inited)
goto out;
if (new_parent)
new_parent_d = new_parent->dentry;
else
new_parent_d = orphandir;
d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
new_parent_d, clk->name);
if (d)
clk->dentry = d;
else
pr_debug("%s: failed to rename debugfs entry for %s\n",
__func__, clk->name);
out:
#endif
clk->parent = new_parent;
__clk_recalc_rates(clk, POST_RATE_CHANGE);
}
static int __clk_set_parent(struct clk *clk, struct clk *parent)
{
struct clk *old_parent;
unsigned long flags;
int ret = -EINVAL;
u8 i;
old_parent = clk->parent;
/* find index of new parent clock using cached parent ptrs */
for (i = 0; i < clk->num_parents; i++)
if (clk->parents[i] == parent)
break;
/*
* find index of new parent clock using string name comparison
* also try to cache the parent to avoid future calls to __clk_lookup
*/
if (i == clk->num_parents)
for (i = 0; i < clk->num_parents; i++)
if (!strcmp(clk->parent_names[i], parent->name)) {
clk->parents[i] = __clk_lookup(parent->name);
break;
}
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
__func__, parent->name, clk->name);
goto out;
}
/* migrate prepare and enable */
if (clk->prepare_count)
__clk_prepare(parent);
/* FIXME replace with clk_is_enabled(clk) someday */
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
__clk_enable(parent);
spin_unlock_irqrestore(&enable_lock, flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
spin_lock_irqsave(&enable_lock, flags);
if (clk->enable_count)
__clk_disable(old_parent);
spin_unlock_irqrestore(&enable_lock, flags);
if (clk->prepare_count)
__clk_unprepare(old_parent);
out:
return ret;
}
/**
* clk_set_parent - switch the parent of a mux clk
* @clk: the mux clk whose input we are switching
* @parent: the new input to clk
*
* Re-parent clk to use parent as it's new input source. If clk has the
* CLK_SET_PARENT_GATE flag set then clk must be gated for this
* operation to succeed. After successfully changing clk's parent
* clk_set_parent will update the clk topology, sysfs topology and
* propagate rate recalculation via __clk_recalc_rates. Returns 0 on
* success, -EERROR otherwise.
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
if (!clk || !clk->ops)
return -EINVAL;
if (!clk->ops->set_parent)
return -ENOSYS;
/* prevent racing with updates to the clock topology */
mutex_lock(&prepare_lock);
if (clk->parent == parent)
goto out;
/* propagate PRE_RATE_CHANGE notifications */
if (clk->notifier_count)
ret = __clk_speculate_rates(clk, parent->rate);
/* abort if a driver objects */
if (ret == NOTIFY_STOP)
goto out;
/* only re-parent if the clock is not in use */
if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
ret = -EBUSY;
else
ret = __clk_set_parent(clk, parent);
/* propagate ABORT_RATE_CHANGE if .set_parent failed */
if (ret) {
__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
goto out;
}
/* propagate rate recalculation downstream */
__clk_reparent(clk, parent);
out:
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
/**
* __clk_init - initialize the data structures in a struct clk
* @dev: device initializing this clk, placeholder for now
* @clk: clk being initialized
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
*
* Any struct clk passed into __clk_init must have the following members
* populated:
* .name
* .ops
* .hw
* .parent_names
* .num_parents
* .flags
*
* Essentially, everything that would normally be passed into clk_register is
* assumed to be initialized already in __clk_init. The other members may be
* populated, but are optional.
*
* __clk_init is only exposed via clk-private.h and is intended for use with
* very large numbers of clocks that need to be statically initialized. It is
* a layering violation to include clk-private.h from any code which implements
* a clock's .ops; as such any statically initialized clock data MUST be in a
* separate C file from the logic that implements it's operations.
*/
void __clk_init(struct device *dev, struct clk *clk)
{
int i;
struct clk *orphan;
struct hlist_node *tmp, *tmp2;
if (!clk)
return;
mutex_lock(&prepare_lock);
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name))
goto out;
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
WARN(!clk->parent_names[i],
"%s: invalid NULL in %s's .parent_names\n",
__func__, clk->name);
/*
* Allocate an array of struct clk *'s to avoid unnecessary string
* look-ups of clk's possible parents. This can fail for clocks passed
* in to clk_init during early boot; thus any access to clk->parents[]
* must always check for a NULL pointer and try to populate it if
* necessary.
*
* If clk->parents is not NULL we skip this entire block. This allows
* for clock drivers to statically initialize clk->parents.
*/
if (clk->num_parents && !clk->parents) {
clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* __clk_lookup returns NULL for parents that have not been
* clk_init'd; thus any access to clk->parents[] must check
* for a NULL pointer. We can always perform lazy lookups for
* missing parents later on.
*/
if (clk->parents)
for (i = 0; i < clk->num_parents; i++)
clk->parents[i] =
__clk_lookup(clk->parent_names[i]);
}
clk->parent = __clk_init_parent(clk);
/*
* Populate clk->parent if parent has already been __clk_init'd. If
* parent has not yet been __clk_init'd then place clk in the orphan
* list. If clk has set the CLK_IS_ROOT flag then place it in the root
* clk list.
*
* Every time a new clk is clk_init'd then we walk the list of orphan
* clocks and re-parent any that are children of the clock currently
* being clk_init'd.
*/
if (clk->parent)
hlist_add_head(&clk->child_node,
&clk->parent->children);
else if (clk->flags & CLK_IS_ROOT)
hlist_add_head(&clk->child_node, &clk_root_list);
else
hlist_add_head(&clk->child_node, &clk_orphan_list);
/*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
* parent's rate. If a clock doesn't have a parent (or is orphaned)
* then rate is set to zero.
*/
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw,
__clk_get_rate(clk->parent));
else if (clk->parent)
clk->rate = clk->parent->rate;
else
clk->rate = 0;
/*
* walk the list of orphan clocks and reparent any that are children of
* this clock
*/
hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
for (i = 0; i < orphan->num_parents; i++)
if (!strcmp(clk->name, orphan->parent_names[i])) {
__clk_reparent(orphan, clk);
break;
}
/*
* optional platform-specific magic
*
* The .init callback is not used by any of the basic clock types, but
* exists for weird hardware that must perform initialization magic.
* Please consider other ways of solving initialization problems before
* using this callback, as it's use is discouraged.
*/
if (clk->ops->init)
clk->ops->init(clk->hw);
clk_debug_register(clk);
out:
mutex_unlock(&prepare_lock);
return;
}
/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
* @name: clock name
* @ops: operations this clock supports
* @hw: link to hardware-specific clock data
* @parent_names: array of string names for all possible parents
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
* rest of the clock API.
*/
struct clk *clk_register(struct device *dev, const char *name,
const struct clk_ops *ops, struct clk_hw *hw,
char **parent_names, u8 num_parents, unsigned long flags)
{
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
if (!clk)
return NULL;
clk->name = name;
clk->ops = ops;
clk->hw = hw;
clk->flags = flags;
clk->parent_names = parent_names;
clk->num_parents = num_parents;
hw->clk = clk;
__clk_init(dev, clk);
return clk;
}
EXPORT_SYMBOL_GPL(clk_register);
/*** clk rate change notifiers ***/
/**
* clk_notifier_register - add a clk rate change notifier
* @clk: struct clk * to watch
* @nb: struct notifier_block * with callback info
*
* Request notification when clk's rate changes. This uses an SRCU
* notifier because we want it to block and notifier unregistrations are
* uncommon. The callbacks associated with the notifier must not
* re-enter into the clk framework by calling any top-level clk APIs;
* this will cause a nested prepare_lock mutex.
*
* Pre-change notifier callbacks will be passed the current, pre-change
* rate of the clk via struct clk_notifier_data.old_rate. The new,
* post-change rate of the clk is passed via struct
* clk_notifier_data.new_rate.
*
* Post-change notifiers will pass the now-current, post-change rate of
* the clk in both struct clk_notifier_data.old_rate and struct
* clk_notifier_data.new_rate.
*
* Abort-change notifiers are effectively the opposite of pre-change
* notifiers: the original pre-change clk rate is passed in via struct
* clk_notifier_data.new_rate and the failed post-change rate is passed
* in via struct clk_notifier_data.old_rate.
*
* clk_notifier_register() must be called from non-atomic context.
* Returns -EINVAL if called with null arguments, -ENOMEM upon
* allocation failure; otherwise, passes along the return value of
* srcu_notifier_chain_register().
*/
int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn;
int ret = -ENOMEM;
if (!clk || !nb)
return -EINVAL;
mutex_lock(&prepare_lock);
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
break;
/* if clk wasn't in the notifier list, allocate new clk_notifier */
if (cn->clk != clk) {
cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
if (!cn)
goto out;
cn->clk = clk;
srcu_init_notifier_head(&cn->notifier_head);
list_add(&cn->node, &clk_notifier_list);
}
ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
clk->notifier_count++;
out:
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_register);
/**
* clk_notifier_unregister - remove a clk rate change notifier
* @clk: struct clk *
* @nb: struct notifier_block * with callback info
*
* Request no further notification for changes to 'clk' and frees memory
* allocated in clk_notifier_register.
*
* Returns -EINVAL if called with null arguments; otherwise, passes
* along the return value of srcu_notifier_chain_unregister().
*/
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
{
struct clk_notifier *cn = NULL;
int ret = -EINVAL;
if (!clk || !nb)
return -EINVAL;
mutex_lock(&prepare_lock);
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
break;
if (cn->clk == clk) {
ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
clk->notifier_count--;
/* XXX the notifier code should handle this better */
if (!cn->notifier_head.head) {
srcu_cleanup_notifier_head(&cn->notifier_head);
kfree(cn);
}
} else {
ret = -ENOENT;
}
mutex_unlock(&prepare_lock);
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
| gpl-2.0 |
teemodk/android_kernel_htc_endeavoru | sound/soc/pxa/zylonite.c | 2718 | 7019 | /*
* zylonite.c -- SoC audio for Zylonite
*
* Copyright 2008 Wolfson Microelectronics PLC.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include "../codecs/wm9713.h"
#include "pxa2xx-ac97.h"
#include "pxa-ssp.h"
/*
* There is a physical switch SW15 on the board which changes the MCLK
* for the WM9713 between the standard AC97 master clock and the
* output of the CLK_POUT signal from the PXA.
*/
static int clk_pout;
module_param(clk_pout, int, 0);
MODULE_PARM_DESC(clk_pout, "Use CLK_POUT as WM9713 MCLK (SW15 on board).");
static struct clk *pout;
static struct snd_soc_card zylonite;
static const struct snd_soc_dapm_widget zylonite_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Microphone", NULL),
SND_SOC_DAPM_MIC("Handset Microphone", NULL),
SND_SOC_DAPM_SPK("Multiactor", NULL),
SND_SOC_DAPM_SPK("Headset Earpiece", NULL),
};
/* Currently supported audio map */
static const struct snd_soc_dapm_route audio_map[] = {
/* Headphone output connected to HPL/HPR */
{ "Headphone", NULL, "HPL" },
{ "Headphone", NULL, "HPR" },
/* On-board earpiece */
{ "Headset Earpiece", NULL, "OUT3" },
/* Headphone mic */
{ "MIC2A", NULL, "Mic Bias" },
{ "Mic Bias", NULL, "Headset Microphone" },
/* On-board mic */
{ "MIC1", NULL, "Mic Bias" },
{ "Mic Bias", NULL, "Handset Microphone" },
/* Multiactor differentially connected over SPKL/SPKR */
{ "Multiactor", NULL, "SPKL" },
{ "Multiactor", NULL, "SPKR" },
};
static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
if (clk_pout)
snd_soc_dai_set_pll(rtd->codec_dai, 0, 0,
clk_get_rate(pout), 0);
snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets,
ARRAY_SIZE(zylonite_dapm_widgets));
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
/* Static setup for now */
snd_soc_dapm_enable_pin(dapm, "Headphone");
snd_soc_dapm_enable_pin(dapm, "Headset Earpiece");
snd_soc_dapm_sync(dapm);
return 0;
}
static int zylonite_voice_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int pll_out = 0;
unsigned int wm9713_div = 0;
int ret = 0;
int rate = params_rate(params);
int width = snd_pcm_format_physical_width(params_format(params));
/* Only support ratios that we can generate neatly from the AC97
* based master clock - in particular, this excludes 44.1kHz.
* In most applications the voice DAC will be used for telephony
* data so multiples of 8kHz will be the common case.
*/
switch (rate) {
case 8000:
wm9713_div = 12;
break;
case 16000:
wm9713_div = 6;
break;
case 48000:
wm9713_div = 2;
break;
default:
/* Don't support OSS emulation */
return -EINVAL;
}
/* Add 1 to the width for the leading clock cycle */
pll_out = rate * (width + 1) * 8;
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0, 1);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, pll_out);
if (ret < 0)
return ret;
if (clk_pout)
ret = snd_soc_dai_set_clkdiv(codec_dai, WM9713_PCMCLK_PLL_DIV,
WM9713_PCMDIV(wm9713_div));
else
ret = snd_soc_dai_set_clkdiv(codec_dai, WM9713_PCMCLK_DIV,
WM9713_PCMDIV(wm9713_div));
if (ret < 0)
return ret;
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops zylonite_voice_ops = {
.hw_params = zylonite_voice_hw_params,
};
static struct snd_soc_dai_link zylonite_dai[] = {
{
.name = "AC97",
.stream_name = "AC97 HiFi",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa2xx-ac97",
.codec_dai_name = "wm9713-hifi",
.init = zylonite_wm9713_init,
},
{
.name = "AC97 Aux",
.stream_name = "AC97 Aux",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa2xx-ac97-aux",
.codec_dai_name = "wm9713-aux",
},
{
.name = "WM9713 Voice",
.stream_name = "WM9713 Voice",
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa-ssp-dai.2",
.codec_dai_name = "wm9713-voice",
.ops = &zylonite_voice_ops,
},
};
static int zylonite_probe(struct snd_soc_card *card)
{
int ret;
if (clk_pout) {
pout = clk_get(NULL, "CLK_POUT");
if (IS_ERR(pout)) {
dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n",
PTR_ERR(pout));
return PTR_ERR(pout);
}
ret = clk_enable(pout);
if (ret != 0) {
dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret);
clk_put(pout);
return ret;
}
dev_dbg(card->dev, "MCLK enabled at %luHz\n",
clk_get_rate(pout));
}
return 0;
}
static int zylonite_remove(struct snd_soc_card *card)
{
if (clk_pout) {
clk_disable(pout);
clk_put(pout);
}
return 0;
}
static int zylonite_suspend_post(struct snd_soc_card *card)
{
if (clk_pout)
clk_disable(pout);
return 0;
}
static int zylonite_resume_pre(struct snd_soc_card *card)
{
int ret = 0;
if (clk_pout) {
ret = clk_enable(pout);
if (ret != 0)
dev_err(card->dev, "Unable to enable CLK_POUT: %d\n",
ret);
}
return ret;
}
static struct snd_soc_card zylonite = {
.name = "Zylonite",
.probe = &zylonite_probe,
.remove = &zylonite_remove,
.suspend_post = &zylonite_suspend_post,
.resume_pre = &zylonite_resume_pre,
.dai_link = zylonite_dai,
.num_links = ARRAY_SIZE(zylonite_dai),
.owner = THIS_MODULE,
};
static struct platform_device *zylonite_snd_ac97_device;
static int __init zylonite_init(void)
{
int ret;
zylonite_snd_ac97_device = platform_device_alloc("soc-audio", -1);
if (!zylonite_snd_ac97_device)
return -ENOMEM;
platform_set_drvdata(zylonite_snd_ac97_device, &zylonite);
ret = platform_device_add(zylonite_snd_ac97_device);
if (ret != 0)
platform_device_put(zylonite_snd_ac97_device);
return ret;
}
static void __exit zylonite_exit(void)
{
platform_device_unregister(zylonite_snd_ac97_device);
}
module_init(zylonite_init);
module_exit(zylonite_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("ALSA SoC WM9713 Zylonite");
MODULE_LICENSE("GPL");
| gpl-2.0 |
renzoolivares/android_kernel_htc_monarudo | fs/ext2/namei.c | 4766 | 9517 | /*
* linux/fs/ext2/namei.c
*
* Rewrite to pagecache. Almost all code had been changed, so blame me
* if the things go wrong. Please, send bug reports to
* viro@parcelfarce.linux.theplanet.co.uk
*
* Stuff here is basically a glue between the VFS and generic UNIXish
* filesystem that keeps everything in pagecache. All knowledge of the
* directory layout is in fs/ext2/dir.c - it turned out to be easily separatable
* and it's easier to debug that way. In principle we might want to
* generalize that a bit and turn it into a library. Or not.
*
* The only non-static object here is ext2_dir_inode_operations.
*
* TODO: get rid of kmap() use, add readahead.
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/namei.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Big-endian to little-endian byte-swapping/bitmaps by
* David S. Miller (davem@caip.rutgers.edu), 1995
*/
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
#include "xip.h"
static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
{
int err = ext2_add_link(dentry, inode);
if (!err) {
d_instantiate(dentry, inode);
unlock_new_inode(inode);
return 0;
}
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput(inode);
return err;
}
/*
* Methods themselves.
*/
static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
{
struct inode * inode;
ino_t ino;
if (dentry->d_name.len > EXT2_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
ino = ext2_inode_by_name(dir, &dentry->d_name);
inode = NULL;
if (ino) {
inode = ext2_iget(dir->i_sb, ino);
if (inode == ERR_PTR(-ESTALE)) {
ext2_error(dir->i_sb, __func__,
"deleted inode referenced: %lu",
(unsigned long) ino);
return ERR_PTR(-EIO);
}
}
return d_splice_alias(inode, dentry);
}
struct dentry *ext2_get_parent(struct dentry *child)
{
struct qstr dotdot = {.name = "..", .len = 2};
unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
return d_obtain_alias(ext2_iget(child->d_inode->i_sb, ino));
}
/*
* By the time this is called, we already have created
* the directory cache entry for the new file, but it
* is so far negative - it has no inode.
*
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
{
struct inode *inode;
dquot_initialize(dir);
inode = ext2_new_inode(dir, mode, &dentry->d_name);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = &ext2_file_inode_operations;
if (ext2_use_xip(inode->i_sb)) {
inode->i_mapping->a_ops = &ext2_aops_xip;
inode->i_fop = &ext2_xip_file_operations;
} else if (test_opt(inode->i_sb, NOBH)) {
inode->i_mapping->a_ops = &ext2_nobh_aops;
inode->i_fop = &ext2_file_operations;
} else {
inode->i_mapping->a_ops = &ext2_aops;
inode->i_fop = &ext2_file_operations;
}
mark_inode_dirty(inode);
return ext2_add_nondir(dentry, inode);
}
static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct inode * inode;
int err;
if (!new_valid_dev(rdev))
return -EINVAL;
dquot_initialize(dir);
inode = ext2_new_inode (dir, mode, &dentry->d_name);
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
#ifdef CONFIG_EXT2_FS_XATTR
inode->i_op = &ext2_special_inode_operations;
#endif
mark_inode_dirty(inode);
err = ext2_add_nondir(dentry, inode);
}
return err;
}
static int ext2_symlink (struct inode * dir, struct dentry * dentry,
const char * symname)
{
struct super_block * sb = dir->i_sb;
int err = -ENAMETOOLONG;
unsigned l = strlen(symname)+1;
struct inode * inode;
if (l > sb->s_blocksize)
goto out;
dquot_initialize(dir);
inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out;
if (l > sizeof (EXT2_I(inode)->i_data)) {
/* slow symlink */
inode->i_op = &ext2_symlink_inode_operations;
if (test_opt(inode->i_sb, NOBH))
inode->i_mapping->a_ops = &ext2_nobh_aops;
else
inode->i_mapping->a_ops = &ext2_aops;
err = page_symlink(inode, symname, l);
if (err)
goto out_fail;
} else {
/* fast symlink */
inode->i_op = &ext2_fast_symlink_inode_operations;
memcpy((char*)(EXT2_I(inode)->i_data),symname,l);
inode->i_size = l-1;
}
mark_inode_dirty(inode);
err = ext2_add_nondir(dentry, inode);
out:
return err;
out_fail:
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput (inode);
goto out;
}
static int ext2_link (struct dentry * old_dentry, struct inode * dir,
struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
int err;
dquot_initialize(dir);
inode->i_ctime = CURRENT_TIME_SEC;
inode_inc_link_count(inode);
ihold(inode);
err = ext2_add_link(dentry, inode);
if (!err) {
d_instantiate(dentry, inode);
return 0;
}
inode_dec_link_count(inode);
iput(inode);
return err;
}
static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
struct inode * inode;
int err;
dquot_initialize(dir);
inode_inc_link_count(dir);
inode = ext2_new_inode(dir, S_IFDIR | mode, &dentry->d_name);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_dir;
inode->i_op = &ext2_dir_inode_operations;
inode->i_fop = &ext2_dir_operations;
if (test_opt(inode->i_sb, NOBH))
inode->i_mapping->a_ops = &ext2_nobh_aops;
else
inode->i_mapping->a_ops = &ext2_aops;
inode_inc_link_count(inode);
err = ext2_make_empty(inode, dir);
if (err)
goto out_fail;
err = ext2_add_link(dentry, inode);
if (err)
goto out_fail;
d_instantiate(dentry, inode);
unlock_new_inode(inode);
out:
return err;
out_fail:
inode_dec_link_count(inode);
inode_dec_link_count(inode);
unlock_new_inode(inode);
iput(inode);
out_dir:
inode_dec_link_count(dir);
goto out;
}
static int ext2_unlink(struct inode * dir, struct dentry *dentry)
{
struct inode * inode = dentry->d_inode;
struct ext2_dir_entry_2 * de;
struct page * page;
int err = -ENOENT;
dquot_initialize(dir);
de = ext2_find_entry (dir, &dentry->d_name, &page);
if (!de)
goto out;
err = ext2_delete_entry (de, page);
if (err)
goto out;
inode->i_ctime = dir->i_ctime;
inode_dec_link_count(inode);
err = 0;
out:
return err;
}
static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
{
struct inode * inode = dentry->d_inode;
int err = -ENOTEMPTY;
if (ext2_empty_dir(inode)) {
err = ext2_unlink(dir, dentry);
if (!err) {
inode->i_size = 0;
inode_dec_link_count(inode);
inode_dec_link_count(dir);
}
}
return err;
}
static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
struct inode * new_dir, struct dentry * new_dentry )
{
struct inode * old_inode = old_dentry->d_inode;
struct inode * new_inode = new_dentry->d_inode;
struct page * dir_page = NULL;
struct ext2_dir_entry_2 * dir_de = NULL;
struct page * old_page;
struct ext2_dir_entry_2 * old_de;
int err = -ENOENT;
dquot_initialize(old_dir);
dquot_initialize(new_dir);
old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
if (!old_de)
goto out;
if (S_ISDIR(old_inode->i_mode)) {
err = -EIO;
dir_de = ext2_dotdot(old_inode, &dir_page);
if (!dir_de)
goto out_old;
}
if (new_inode) {
struct page *new_page;
struct ext2_dir_entry_2 *new_de;
err = -ENOTEMPTY;
if (dir_de && !ext2_empty_dir (new_inode))
goto out_dir;
err = -ENOENT;
new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
if (!new_de)
goto out_dir;
ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
new_inode->i_ctime = CURRENT_TIME_SEC;
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
} else {
err = ext2_add_link(new_dentry, old_inode);
if (err)
goto out_dir;
if (dir_de)
inode_inc_link_count(new_dir);
}
/*
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
old_inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(old_inode);
ext2_delete_entry (old_de, old_page);
if (dir_de) {
if (old_dir != new_dir)
ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
else {
kunmap(dir_page);
page_cache_release(dir_page);
}
inode_dec_link_count(old_dir);
}
return 0;
out_dir:
if (dir_de) {
kunmap(dir_page);
page_cache_release(dir_page);
}
out_old:
kunmap(old_page);
page_cache_release(old_page);
out:
return err;
}
const struct inode_operations ext2_dir_inode_operations = {
.create = ext2_create,
.lookup = ext2_lookup,
.link = ext2_link,
.unlink = ext2_unlink,
.symlink = ext2_symlink,
.mkdir = ext2_mkdir,
.rmdir = ext2_rmdir,
.mknod = ext2_mknod,
.rename = ext2_rename,
#ifdef CONFIG_EXT2_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
.removexattr = generic_removexattr,
#endif
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
};
const struct inode_operations ext2_special_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext2_listxattr,
.removexattr = generic_removexattr,
#endif
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
};
| gpl-2.0 |
Redmi-dev/android_kernel_xiaomi_msm8226 | drivers/staging/speakup/speakup_keypc.c | 7582 | 8664 | /*
* written by David Borowski
*
* Copyright (C) 2003 David Borowski.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* specificly written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/serial_reg.h>
#include "spk_priv.h"
#include "speakup.h"
#define DRV_VERSION "2.10"
#define SYNTH_IO_EXTENT 0x04
#define SWAIT udelay(70)
#define PROCSPEECH 0x1f
#define SYNTH_CLEAR 0x03
static int synth_probe(struct spk_synth *synth);
static void keynote_release(void);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_port;
static int port_forced;
static unsigned int synth_portlist[] = { 0x2a8, 0 };
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[f130]" } },
{ CAPS_STOP, .u.s = {"[f90]" } },
{ RATE, .u.n = {"\04%c ", 8, 0, 10, 81, -8, NULL } },
{ PITCH, .u.n = {"[f%d]", 5, 0, 9, 40, 10, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/keypc.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&rate_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_keypc = {
.name = "keypc",
.version = DRV_VERSION,
.long_name = "Keynote PC",
.init = "[t][n7,1][n8,0]",
.procspeech = PROCSPEECH,
.clear = SYNTH_CLEAR,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 1000,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.probe = synth_probe,
.release = keynote_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "keypc",
},
};
static inline bool synth_writable(void)
{
return (inb_p(synth_port + UART_RX) & 0x10) != 0;
}
static inline bool synth_full(void)
{
return (inb_p(synth_port + UART_RX) & 0x80) == 0;
}
static char *oops(void)
{
int s1, s2, s3, s4;
s1 = inb_p(synth_port);
s2 = inb_p(synth_port+1);
s3 = inb_p(synth_port+2);
s4 = inb_p(synth_port+3);
pr_warn("synth timeout %d %d %d %d\n", s1, s2, s3, s4);
return NULL;
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
int timeout;
while ((ch = *buf)) {
if (ch == '\n')
ch = PROCSPEECH;
if (synth_full())
return buf;
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
return oops();
outb_p(ch, synth_port);
udelay(70);
buf++;
}
return 0;
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
int timeout;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
struct var_t *full_time;
int delay_time_val;
int full_time_val;
int jiffy_delta_val;
jiffy_delta = get_var(JIFFY);
delay_time = get_var(DELAY);
full_time = get_var(FULL);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spk_unlock(flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spk_lock(flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spk_unlock(flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
spk_unlock(flags);
break;
}
set_current_state(TASK_INTERRUPTIBLE);
full_time_val = full_time->u.n.value;
spk_unlock(flags);
if (synth_full()) {
schedule_timeout(msecs_to_jiffies(full_time_val));
continue;
}
set_current_state(TASK_RUNNING);
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0) {
oops();
break;
}
spk_lock(flags);
ch = synth_buffer_getc();
spk_unlock(flags);
if (ch == '\n')
ch = PROCSPEECH;
outb_p(ch, synth_port);
SWAIT;
if ((jiffies >= jiff_max) && (ch == SPACE)) {
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0) {
oops();
break;
}
outb_p(PROCSPEECH, synth_port);
spk_lock(flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spk_unlock(flags);
schedule_timeout(msecs_to_jiffies(delay_time_val));
jiff_max = jiffies+jiffy_delta_val;
}
}
timeout = 1000;
while (synth_writable())
if (--timeout <= 0)
break;
if (timeout <= 0)
oops();
else
outb_p(PROCSPEECH, synth_port);
}
static void synth_flush(struct spk_synth *synth)
{
outb_p(SYNTH_CLEAR, synth_port);
}
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
int i = 0;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
synth_port = port_forced;
pr_info("probe forced to %x by kernel command line\n",
synth_port);
if (synth_request_region(synth_port-1, SYNTH_IO_EXTENT)) {
pr_warn("sorry, port already reserved\n");
return -EBUSY;
}
port_val = inb(synth_port);
} else {
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i],
SYNTH_IO_EXTENT)) {
pr_warn
("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
continue;
}
port_val = inb(synth_portlist[i]);
if (port_val == 0x80) {
synth_port = synth_portlist[i];
break;
}
}
}
if (port_val != 0x80) {
pr_info("%s: not found\n", synth->long_name);
synth_release_region(synth_port, SYNTH_IO_EXTENT);
synth_port = 0;
return -ENODEV;
}
pr_info("%s: %03x-%03x, driver version %s,\n", synth->long_name,
synth_port, synth_port+SYNTH_IO_EXTENT-1,
synth->version);
synth->alive = 1;
return 0;
}
static void keynote_release(void)
{
if (synth_port)
synth_release_region(synth_port, SYNTH_IO_EXTENT);
synth_port = 0;
}
module_param_named(port, port_forced, int, S_IRUGO);
module_param_named(start, synth_keypc.startup, short, S_IRUGO);
MODULE_PARM_DESC(port, "Set the port for the synthesizer (override probing).");
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init keypc_init(void)
{
return synth_add(&synth_keypc);
}
static void __exit keypc_exit(void)
{
synth_remove(&synth_keypc);
}
module_init(keypc_init);
module_exit(keypc_exit);
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for Keynote Gold PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.