repo_name
string
path
string
copies
string
size
string
content
string
license
string
fransklaver/linux
fs/btrfs/props.c
610
9893
/* * Copyright (C) 2014 Filipe David Borba Manana <fdmanana@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/hashtable.h> #include "props.h" #include "btrfs_inode.h" #include "hash.h" #include "transaction.h" #include "xattr.h" #define BTRFS_PROP_HANDLERS_HT_BITS 8 static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS); struct prop_handler { struct hlist_node node; const char *xattr_name; int (*validate)(const char *value, size_t len); int (*apply)(struct inode *inode, const char *value, size_t len); const char *(*extract)(struct inode *inode); int inheritable; }; static int prop_compression_validate(const char *value, size_t len); static int prop_compression_apply(struct inode *inode, const char *value, size_t len); static const char *prop_compression_extract(struct inode *inode); static struct prop_handler prop_handlers[] = { { .xattr_name = XATTR_BTRFS_PREFIX "compression", .validate = prop_compression_validate, .apply = prop_compression_apply, .extract = prop_compression_extract, .inheritable = 1 }, { .xattr_name = NULL } }; void __init btrfs_props_init(void) { struct prop_handler *p; hash_init(prop_handlers_ht); for (p = &prop_handlers[0]; p->xattr_name; p++) { u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name)); hash_add(prop_handlers_ht, &p->node, h); } } static const struct hlist_head *find_prop_handlers_by_hash(const u64 hash) { struct hlist_head *h; h = &prop_handlers_ht[hash_min(hash, BTRFS_PROP_HANDLERS_HT_BITS)]; if (hlist_empty(h)) return NULL; return h; } static const struct prop_handler * find_prop_handler(const char *name, const struct hlist_head *handlers) { struct prop_handler *h; if (!handlers) { u64 hash = btrfs_name_hash(name, strlen(name)); handlers = find_prop_handlers_by_hash(hash); if (!handlers) return NULL; } hlist_for_each_entry(h, handlers, node) if (!strcmp(h->xattr_name, name)) return h; return NULL; } static int __btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const char *value, size_t value_len, int flags) { const struct prop_handler *handler; int ret; if (strlen(name) <= XATTR_BTRFS_PREFIX_LEN) return -EINVAL; handler = find_prop_handler(name, NULL); if (!handler) return -EINVAL; if (value_len == 0) { ret = __btrfs_setxattr(trans, inode, handler->xattr_name, NULL, 0, flags); if (ret) return ret; ret = handler->apply(inode, NULL, 0); ASSERT(ret == 0); return ret; } ret = handler->validate(value, value_len); if (ret) return ret; ret = __btrfs_setxattr(trans, inode, handler->xattr_name, value, value_len, flags); if (ret) return ret; ret = handler->apply(inode, value, value_len); if (ret) { __btrfs_setxattr(trans, inode, handler->xattr_name, NULL, 0, flags); return ret; } set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags); return 0; } int btrfs_set_prop(struct inode *inode, const char *name, const char *value, size_t value_len, int flags) { return __btrfs_set_prop(NULL, inode, name, value, value_len, flags); } static int iterate_object_props(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, void (*iterator)(void *, const struct prop_handler *, const char *, size_t), void *ctx) { int ret; char *name_buf = NULL; char *value_buf = NULL; int name_buf_len = 0; int value_buf_len = 0; while (1) { struct btrfs_key key; struct btrfs_dir_item *di; struct extent_buffer *leaf; u32 total_len, cur, this_len; int slot; const struct hlist_head *handlers; slot = path->slots[0]; leaf = path->nodes[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != objectid) break; if (key.type != BTRFS_XATTR_ITEM_KEY) break; handlers = find_prop_handlers_by_hash(key.offset); if (!handlers) goto next_slot; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); cur = 0; total_len = btrfs_item_size_nr(leaf, slot); while (cur < total_len) { u32 name_len = btrfs_dir_name_len(leaf, di); u32 data_len = btrfs_dir_data_len(leaf, di); unsigned long name_ptr, data_ptr; const struct prop_handler *handler; this_len = sizeof(*di) + name_len + data_len; name_ptr = (unsigned long)(di + 1); data_ptr = name_ptr + name_len; if (name_len <= XATTR_BTRFS_PREFIX_LEN || memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX, name_ptr, XATTR_BTRFS_PREFIX_LEN)) goto next_dir_item; if (name_len >= name_buf_len) { kfree(name_buf); name_buf_len = name_len + 1; name_buf = kmalloc(name_buf_len, GFP_NOFS); if (!name_buf) { ret = -ENOMEM; goto out; } } read_extent_buffer(leaf, name_buf, name_ptr, name_len); name_buf[name_len] = '\0'; handler = find_prop_handler(name_buf, handlers); if (!handler) goto next_dir_item; if (data_len > value_buf_len) { kfree(value_buf); value_buf_len = data_len; value_buf = kmalloc(data_len, GFP_NOFS); if (!value_buf) { ret = -ENOMEM; goto out; } } read_extent_buffer(leaf, value_buf, data_ptr, data_len); iterator(ctx, handler, value_buf, data_len); next_dir_item: cur += this_len; di = (struct btrfs_dir_item *)((char *) di + this_len); } next_slot: path->slots[0]++; } ret = 0; out: btrfs_release_path(path); kfree(name_buf); kfree(value_buf); return ret; } static void inode_prop_iterator(void *ctx, const struct prop_handler *handler, const char *value, size_t len) { struct inode *inode = ctx; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; ret = handler->apply(inode, value, len); if (unlikely(ret)) btrfs_warn(root->fs_info, "error applying prop %s to ino %llu (root %llu): %d", handler->xattr_name, btrfs_ino(inode), root->root_key.objectid, ret); else set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags); } int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 ino = btrfs_ino(inode); int ret; ret = iterate_object_props(root, path, ino, inode_prop_iterator, inode); return ret; } static int inherit_props(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *parent) { const struct prop_handler *h; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (!test_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(parent)->runtime_flags)) return 0; for (h = &prop_handlers[0]; h->xattr_name; h++) { const char *value; u64 num_bytes; if (!h->inheritable) continue; value = h->extract(parent); if (!value) continue; num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_add(root, trans->block_rsv, num_bytes, BTRFS_RESERVE_NO_FLUSH); if (ret) goto out; ret = __btrfs_set_prop(trans, inode, h->xattr_name, value, strlen(value), 0); btrfs_block_rsv_release(root, trans->block_rsv, num_bytes); if (ret) goto out; } ret = 0; out: return ret; } int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir) { if (!dir) return 0; return inherit_props(trans, inode, dir); } int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *parent_root) { struct btrfs_key key; struct inode *parent_inode, *child_inode; int ret; key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; parent_inode = btrfs_iget(parent_root->fs_info->sb, &key, parent_root, NULL); if (IS_ERR(parent_inode)) return PTR_ERR(parent_inode); child_inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); if (IS_ERR(child_inode)) { iput(parent_inode); return PTR_ERR(child_inode); } ret = inherit_props(trans, child_inode, parent_inode); iput(child_inode); iput(parent_inode); return ret; } static int prop_compression_validate(const char *value, size_t len) { if (!strncmp("lzo", value, len)) return 0; else if (!strncmp("zlib", value, len)) return 0; return -EINVAL; } static int prop_compression_apply(struct inode *inode, const char *value, size_t len) { int type; if (len == 0) { BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; return 0; } if (!strncmp("lzo", value, len)) type = BTRFS_COMPRESS_LZO; else if (!strncmp("zlib", value, len)) type = BTRFS_COMPRESS_ZLIB; else return -EINVAL; BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; BTRFS_I(inode)->force_compress = type; return 0; } static const char *prop_compression_extract(struct inode *inode) { switch (BTRFS_I(inode)->force_compress) { case BTRFS_COMPRESS_ZLIB: return "zlib"; case BTRFS_COMPRESS_LZO: return "lzo"; } return NULL; }
gpl-2.0
iamroot12D/linux
fs/btrfs/props.c
610
9893
/* * Copyright (C) 2014 Filipe David Borba Manana <fdmanana@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/hashtable.h> #include "props.h" #include "btrfs_inode.h" #include "hash.h" #include "transaction.h" #include "xattr.h" #define BTRFS_PROP_HANDLERS_HT_BITS 8 static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS); struct prop_handler { struct hlist_node node; const char *xattr_name; int (*validate)(const char *value, size_t len); int (*apply)(struct inode *inode, const char *value, size_t len); const char *(*extract)(struct inode *inode); int inheritable; }; static int prop_compression_validate(const char *value, size_t len); static int prop_compression_apply(struct inode *inode, const char *value, size_t len); static const char *prop_compression_extract(struct inode *inode); static struct prop_handler prop_handlers[] = { { .xattr_name = XATTR_BTRFS_PREFIX "compression", .validate = prop_compression_validate, .apply = prop_compression_apply, .extract = prop_compression_extract, .inheritable = 1 }, { .xattr_name = NULL } }; void __init btrfs_props_init(void) { struct prop_handler *p; hash_init(prop_handlers_ht); for (p = &prop_handlers[0]; p->xattr_name; p++) { u64 h = btrfs_name_hash(p->xattr_name, strlen(p->xattr_name)); hash_add(prop_handlers_ht, &p->node, h); } } static const struct hlist_head *find_prop_handlers_by_hash(const u64 hash) { struct hlist_head *h; h = &prop_handlers_ht[hash_min(hash, BTRFS_PROP_HANDLERS_HT_BITS)]; if (hlist_empty(h)) return NULL; return h; } static const struct prop_handler * find_prop_handler(const char *name, const struct hlist_head *handlers) { struct prop_handler *h; if (!handlers) { u64 hash = btrfs_name_hash(name, strlen(name)); handlers = find_prop_handlers_by_hash(hash); if (!handlers) return NULL; } hlist_for_each_entry(h, handlers, node) if (!strcmp(h->xattr_name, name)) return h; return NULL; } static int __btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const char *value, size_t value_len, int flags) { const struct prop_handler *handler; int ret; if (strlen(name) <= XATTR_BTRFS_PREFIX_LEN) return -EINVAL; handler = find_prop_handler(name, NULL); if (!handler) return -EINVAL; if (value_len == 0) { ret = __btrfs_setxattr(trans, inode, handler->xattr_name, NULL, 0, flags); if (ret) return ret; ret = handler->apply(inode, NULL, 0); ASSERT(ret == 0); return ret; } ret = handler->validate(value, value_len); if (ret) return ret; ret = __btrfs_setxattr(trans, inode, handler->xattr_name, value, value_len, flags); if (ret) return ret; ret = handler->apply(inode, value, value_len); if (ret) { __btrfs_setxattr(trans, inode, handler->xattr_name, NULL, 0, flags); return ret; } set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags); return 0; } int btrfs_set_prop(struct inode *inode, const char *name, const char *value, size_t value_len, int flags) { return __btrfs_set_prop(NULL, inode, name, value, value_len, flags); } static int iterate_object_props(struct btrfs_root *root, struct btrfs_path *path, u64 objectid, void (*iterator)(void *, const struct prop_handler *, const char *, size_t), void *ctx) { int ret; char *name_buf = NULL; char *value_buf = NULL; int name_buf_len = 0; int value_buf_len = 0; while (1) { struct btrfs_key key; struct btrfs_dir_item *di; struct extent_buffer *leaf; u32 total_len, cur, this_len; int slot; const struct hlist_head *handlers; slot = path->slots[0]; leaf = path->nodes[0]; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != objectid) break; if (key.type != BTRFS_XATTR_ITEM_KEY) break; handlers = find_prop_handlers_by_hash(key.offset); if (!handlers) goto next_slot; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); cur = 0; total_len = btrfs_item_size_nr(leaf, slot); while (cur < total_len) { u32 name_len = btrfs_dir_name_len(leaf, di); u32 data_len = btrfs_dir_data_len(leaf, di); unsigned long name_ptr, data_ptr; const struct prop_handler *handler; this_len = sizeof(*di) + name_len + data_len; name_ptr = (unsigned long)(di + 1); data_ptr = name_ptr + name_len; if (name_len <= XATTR_BTRFS_PREFIX_LEN || memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX, name_ptr, XATTR_BTRFS_PREFIX_LEN)) goto next_dir_item; if (name_len >= name_buf_len) { kfree(name_buf); name_buf_len = name_len + 1; name_buf = kmalloc(name_buf_len, GFP_NOFS); if (!name_buf) { ret = -ENOMEM; goto out; } } read_extent_buffer(leaf, name_buf, name_ptr, name_len); name_buf[name_len] = '\0'; handler = find_prop_handler(name_buf, handlers); if (!handler) goto next_dir_item; if (data_len > value_buf_len) { kfree(value_buf); value_buf_len = data_len; value_buf = kmalloc(data_len, GFP_NOFS); if (!value_buf) { ret = -ENOMEM; goto out; } } read_extent_buffer(leaf, value_buf, data_ptr, data_len); iterator(ctx, handler, value_buf, data_len); next_dir_item: cur += this_len; di = (struct btrfs_dir_item *)((char *) di + this_len); } next_slot: path->slots[0]++; } ret = 0; out: btrfs_release_path(path); kfree(name_buf); kfree(value_buf); return ret; } static void inode_prop_iterator(void *ctx, const struct prop_handler *handler, const char *value, size_t len) { struct inode *inode = ctx; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; ret = handler->apply(inode, value, len); if (unlikely(ret)) btrfs_warn(root->fs_info, "error applying prop %s to ino %llu (root %llu): %d", handler->xattr_name, btrfs_ino(inode), root->root_key.objectid, ret); else set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags); } int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 ino = btrfs_ino(inode); int ret; ret = iterate_object_props(root, path, ino, inode_prop_iterator, inode); return ret; } static int inherit_props(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *parent) { const struct prop_handler *h; struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (!test_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(parent)->runtime_flags)) return 0; for (h = &prop_handlers[0]; h->xattr_name; h++) { const char *value; u64 num_bytes; if (!h->inheritable) continue; value = h->extract(parent); if (!value) continue; num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_add(root, trans->block_rsv, num_bytes, BTRFS_RESERVE_NO_FLUSH); if (ret) goto out; ret = __btrfs_set_prop(trans, inode, h->xattr_name, value, strlen(value), 0); btrfs_block_rsv_release(root, trans->block_rsv, num_bytes); if (ret) goto out; } ret = 0; out: return ret; } int btrfs_inode_inherit_props(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir) { if (!dir) return 0; return inherit_props(trans, inode, dir); } int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_root *parent_root) { struct btrfs_key key; struct inode *parent_inode, *child_inode; int ret; key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; parent_inode = btrfs_iget(parent_root->fs_info->sb, &key, parent_root, NULL); if (IS_ERR(parent_inode)) return PTR_ERR(parent_inode); child_inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); if (IS_ERR(child_inode)) { iput(parent_inode); return PTR_ERR(child_inode); } ret = inherit_props(trans, child_inode, parent_inode); iput(child_inode); iput(parent_inode); return ret; } static int prop_compression_validate(const char *value, size_t len) { if (!strncmp("lzo", value, len)) return 0; else if (!strncmp("zlib", value, len)) return 0; return -EINVAL; } static int prop_compression_apply(struct inode *inode, const char *value, size_t len) { int type; if (len == 0) { BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; return 0; } if (!strncmp("lzo", value, len)) type = BTRFS_COMPRESS_LZO; else if (!strncmp("zlib", value, len)) type = BTRFS_COMPRESS_ZLIB; else return -EINVAL; BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; BTRFS_I(inode)->force_compress = type; return 0; } static const char *prop_compression_extract(struct inode *inode) { switch (BTRFS_I(inode)->force_compress) { case BTRFS_COMPRESS_ZLIB: return "zlib"; case BTRFS_COMPRESS_LZO: return "lzo"; } return NULL; }
gpl-2.0
PatrikKT/android_kernel_htc_a31ul
drivers/staging/cxt1e1/linux.c
2146
34108
/* Copyright (C) 2007-2008 One Stop Systems * Copyright (C) 2003-2006 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/hdlc.h> #include <linux/if_arp.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include "pmcc4_sysdep.h" #include "sbecom_inline_linux.h" #include "libsbew.h" #include "pmcc4.h" #include "pmcc4_ioctls.h" #include "pmcc4_private.h" #include "sbeproc.h" /***************************************************************************************** * Error out early if we have compiler trouble. * * (This section is included from the kernel's init/main.c as a friendly * spiderman recommendation...) * * Versions of gcc older than that listed below may actually compile and link * okay, but the end product can have subtle run time bugs. To avoid associated * bogus bug reports, we flatly refuse to compile with a gcc that is known to be * too old from the very beginning. */ #if (__GNUC__ < 3) || (__GNUC__ == 3 && __GNUC_MINOR__ < 2) #error Sorry, your GCC is too old. It builds incorrect kernels. #endif #if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0 #warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended. #endif /*****************************************************************************************/ #ifdef SBE_INCLUDE_SYMBOLS #define STATIC #else #define STATIC static #endif #define CHANNAME "hdlc" /*******************************************************************/ /* forward references */ status_t c4_chan_work_init (mpi_t *, mch_t *); void musycc_wq_chan_restart (void *); status_t __init c4_init (ci_t *, u_char *, u_char *); status_t __init c4_init2 (ci_t *); ci_t *__init c4_new (void *); int __init c4hw_attach_all (void); void __init hdw_sn_get (hdw_info_t *, int); #ifdef CONFIG_SBE_PMCC4_NCOMM irqreturn_t c4_ebus_intr_th_handler (void *); #endif int c4_frame_rw (ci_t *, struct sbecom_port_param *); status_t c4_get_port (ci_t *, int); int c4_loop_port (ci_t *, int, u_int8_t); int c4_musycc_rw (ci_t *, struct c4_musycc_param *); int c4_new_chan (ci_t *, int, int, void *); status_t c4_set_port (ci_t *, int); int c4_pld_rw (ci_t *, struct sbecom_port_param *); void cleanup_devs (void); void cleanup_ioremap (void); status_t musycc_chan_down (ci_t *, int); irqreturn_t musycc_intr_th_handler (void *); int musycc_start_xmit (ci_t *, int, void *); extern char pmcc4_OSSI_release[]; extern ci_t *CI; extern struct s_hdw_info hdw_info[]; #if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \ defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE) #define _v7_hdlc_ 1 #else #define _v7_hdlc_ 0 #endif #if _v7_hdlc_ #define V7(x) (x ## _v7) extern int hdlc_netif_rx_v7 (hdlc_device *, struct sk_buff *); extern int register_hdlc_device_v7 (hdlc_device *); extern int unregister_hdlc_device_v7 (hdlc_device *); #else #define V7(x) x #endif int error_flag; /* module load error reporting */ int cxt1e1_log_level = LOG_ERROR; int log_level_default = LOG_ERROR; module_param(cxt1e1_log_level, int, 0444); int cxt1e1_max_mru = MUSYCC_MRU; int max_mru_default = MUSYCC_MRU; module_param(cxt1e1_max_mru, int, 0444); int cxt1e1_max_mtu = MUSYCC_MTU; int max_mtu_default = MUSYCC_MTU; module_param(cxt1e1_max_mtu, int, 0444); int max_txdesc_used = MUSYCC_TXDESC_MIN; int max_txdesc_default = MUSYCC_TXDESC_MIN; module_param(max_txdesc_used, int, 0444); int max_rxdesc_used = MUSYCC_RXDESC_MIN; int max_rxdesc_default = MUSYCC_RXDESC_MIN; module_param(max_rxdesc_used, int, 0444); /****************************************************************************/ /****************************************************************************/ /****************************************************************************/ void * getuserbychan (int channum) { mch_t *ch; ch = c4_find_chan (channum); return ch ? ch->user : 0; } char * get_hdlc_name (hdlc_device * hdlc) { struct c4_priv *priv = hdlc->priv; struct net_device *dev = getuserbychan (priv->channum); return dev->name; } static status_t mkret (int bsd) { if (bsd > 0) return -bsd; else return bsd; } /***************************************************************************/ #include <linux/workqueue.h> /*** * One workqueue (wq) per port (since musycc allows simultaneous group * commands), with individual data for each channel: * * mpi_t -> struct workqueue_struct *wq_port; (dynamically allocated using * create_workqueue()) * * With work structure (work) statically allocated for each channel: * * mch_t -> struct work_struct ch_work; (statically allocated using ???) * ***/ /* * Called by the start transmit routine when a channel TX_ENABLE is to be * issued. This queues the transmission start request among other channels * within a port's group. */ void c4_wk_chan_restart (mch_t * ch) { mpi_t *pi = ch->up; #ifdef RLD_RESTART_DEBUG pr_info(">> %s: queueing Port %d Chan %d, mch_t @ %p\n", __func__, pi->portnum, ch->channum, ch); #endif /* create new entry w/in workqueue for this channel and let'er rip */ /** queue_work (struct workqueue_struct *queue, ** struct work_struct *work); **/ queue_work (pi->wq_port, &ch->ch_work); } status_t c4_wk_chan_init (mpi_t * pi, mch_t * ch) { /* * this will be used to restart a stopped channel */ /** INIT_WORK (struct work_struct *work, ** void (*function)(void *), ** void *data); **/ INIT_WORK(&ch->ch_work, (void *)musycc_wq_chan_restart); return 0; /* success */ } status_t c4_wq_port_init (mpi_t * pi) { char name[16], *np; /* NOTE: name of the queue limited by system * to 10 characters */ if (pi->wq_port) return 0; /* already initialized */ np = name; memset (name, 0, 16); sprintf (np, "%s%d", pi->up->devname, pi->portnum); /* IE pmcc4-01) */ #ifdef RLD_RESTART_DEBUG pr_info(">> %s: creating workqueue <%s> for Port %d.\n", __func__, name, pi->portnum); /* RLD DEBUG */ #endif if (!(pi->wq_port = create_singlethread_workqueue (name))) return ENOMEM; return 0; /* success */ } void c4_wq_port_cleanup (mpi_t * pi) { /* * PORT POINT: cannot call this if WQ is statically allocated w/in * structure since it calls kfree(wq); */ if (pi->wq_port) { destroy_workqueue (pi->wq_port); /* this also calls * flush_workqueue() */ pi->wq_port = 0; } } /***************************************************************************/ irqreturn_t c4_linux_interrupt (int irq, void *dev_instance) { struct net_device *ndev = dev_instance; return musycc_intr_th_handler(netdev_priv(ndev)); } #ifdef CONFIG_SBE_PMCC4_NCOMM irqreturn_t c4_ebus_interrupt (int irq, void *dev_instance) { struct net_device *ndev = dev_instance; return c4_ebus_intr_th_handler(netdev_priv(ndev)); } #endif static int void_open (struct net_device * ndev) { pr_info("%s: trying to open master device !\n", ndev->name); return -1; } STATIC int chan_open (struct net_device * ndev) { hdlc_device *hdlc = dev_to_hdlc (ndev); const struct c4_priv *priv = hdlc->priv; int ret; if ((ret = hdlc_open (ndev))) { pr_info("hdlc_open failure, err %d.\n", ret); return ret; } if ((ret = c4_chan_up (priv->ci, priv->channum))) return -ret; try_module_get (THIS_MODULE); netif_start_queue (ndev); return 0; /* no error = success */ } STATIC int chan_close (struct net_device * ndev) { hdlc_device *hdlc = dev_to_hdlc (ndev); const struct c4_priv *priv = hdlc->priv; netif_stop_queue (ndev); musycc_chan_down ((ci_t *) 0, priv->channum); hdlc_close (ndev); module_put (THIS_MODULE); return 0; } STATIC int chan_dev_ioctl (struct net_device * dev, struct ifreq * ifr, int cmd) { return hdlc_ioctl (dev, ifr, cmd); } STATIC int chan_attach_noop (struct net_device * ndev, unsigned short foo_1, unsigned short foo_2) { return 0; /* our driver has nothing to do here, show's * over, go home */ } STATIC struct net_device_stats * chan_get_stats (struct net_device * ndev) { mch_t *ch; struct net_device_stats *nstats; struct sbecom_chan_stats *stats; int channum; { struct c4_priv *priv; priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv; channum = priv->channum; } ch = c4_find_chan (channum); if (ch == NULL) return NULL; nstats = &ndev->stats; stats = &ch->s; memset (nstats, 0, sizeof (struct net_device_stats)); nstats->rx_packets = stats->rx_packets; nstats->tx_packets = stats->tx_packets; nstats->rx_bytes = stats->rx_bytes; nstats->tx_bytes = stats->tx_bytes; nstats->rx_errors = stats->rx_length_errors + stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors + stats->rx_fifo_errors + stats->rx_missed_errors; nstats->tx_errors = stats->tx_dropped + stats->tx_aborted_errors + stats->tx_fifo_errors; nstats->rx_dropped = stats->rx_dropped; nstats->tx_dropped = stats->tx_dropped; nstats->rx_length_errors = stats->rx_length_errors; nstats->rx_over_errors = stats->rx_over_errors; nstats->rx_crc_errors = stats->rx_crc_errors; nstats->rx_frame_errors = stats->rx_frame_errors; nstats->rx_fifo_errors = stats->rx_fifo_errors; nstats->rx_missed_errors = stats->rx_missed_errors; nstats->tx_aborted_errors = stats->tx_aborted_errors; nstats->tx_fifo_errors = stats->tx_fifo_errors; return nstats; } static ci_t * get_ci_by_dev (struct net_device * ndev) { return (ci_t *)(netdev_priv(ndev)); } STATIC int c4_linux_xmit (struct sk_buff * skb, struct net_device * ndev) { const struct c4_priv *priv; int rval; hdlc_device *hdlc = dev_to_hdlc (ndev); priv = hdlc->priv; rval = musycc_start_xmit (priv->ci, priv->channum, skb); return rval; } static const struct net_device_ops chan_ops = { .ndo_open = chan_open, .ndo_stop = chan_close, .ndo_start_xmit = c4_linux_xmit, .ndo_do_ioctl = chan_dev_ioctl, .ndo_get_stats = chan_get_stats, }; STATIC struct net_device * create_chan (struct net_device * ndev, ci_t * ci, struct sbecom_chan_param * cp) { hdlc_device *hdlc; struct net_device *dev; hdw_info_t *hi; int ret; if (c4_find_chan (cp->channum)) return 0; /* channel already exists */ { struct c4_priv *priv; /* allocate then fill in private data structure */ priv = OS_kmalloc (sizeof (struct c4_priv)); if (!priv) { pr_warning("%s: no memory for net_device !\n", ci->devname); return 0; } dev = alloc_hdlcdev (priv); if (!dev) { pr_warning("%s: no memory for hdlc_device !\n", ci->devname); OS_kfree (priv); return 0; } priv->ci = ci; priv->channum = cp->channum; } hdlc = dev_to_hdlc (dev); dev->base_addr = 0; /* not I/O mapped */ dev->irq = ndev->irq; dev->type = ARPHRD_RAWHDLC; *dev->name = 0; /* default ifconfig name = "hdlc" */ hi = (hdw_info_t *) ci->hdw_info; if (hi->mfg_info_sts == EEPROM_OK) { switch (hi->promfmt) { case PROM_FORMAT_TYPE1: memcpy (dev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6); break; case PROM_FORMAT_TYPE2: memcpy (dev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6); break; default: memset (dev->dev_addr, 0, 6); break; } } else { memset (dev->dev_addr, 0, 6); } hdlc->xmit = c4_linux_xmit; dev->netdev_ops = &chan_ops; /* * The native hdlc stack calls this 'attach' routine during * hdlc_raw_ioctl(), passing parameters for line encoding and parity. * Since hdlc_raw_ioctl() stack does not interrogate whether an 'attach' * routine is actually registered or not, we supply a dummy routine which * does nothing (since encoding and parity are setup for our driver via a * special configuration application). */ hdlc->attach = chan_attach_noop; rtnl_unlock (); /* needed due to Ioctl calling sequence */ ret = register_hdlc_device (dev); /* NOTE: <stats> setting must occur AFTER registration in order to "take" */ dev->tx_queue_len = MAX_DEFAULT_IFQLEN; rtnl_lock (); /* needed due to Ioctl calling sequence */ if (ret) { if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: create_chan[%d] registration error = %d.\n", ci->devname, cp->channum, ret); free_netdev (dev); /* cleanup */ return 0; /* failed to register */ } return dev; } /* the idea here is to get port information and pass it back (using pointer) */ STATIC status_t do_get_port (struct net_device * ndev, void *data) { int ret; ci_t *ci; /* ci stands for card information */ struct sbecom_port_param pp;/* copy data to kernel land */ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param))) return -EFAULT; if (pp.portnum >= MUSYCC_NPORTS) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; /* get card info */ ret = mkret (c4_get_port (ci, pp.portnum)); if (ret) return ret; if (copy_to_user (data, &ci->port[pp.portnum].p, sizeof (struct sbecom_port_param))) return -EFAULT; return 0; } /* this function copys the user data and then calls the real action function */ STATIC status_t do_set_port (struct net_device * ndev, void *data) { ci_t *ci; /* ci stands for card information */ struct sbecom_port_param pp;/* copy data to kernel land */ if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param))) return -EFAULT; if (pp.portnum >= MUSYCC_NPORTS) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; /* get card info */ if (pp.portnum >= ci->max_port) /* sanity check */ return -ENXIO; memcpy (&ci->port[pp.portnum].p, &pp, sizeof (struct sbecom_port_param)); return mkret (c4_set_port (ci, pp.portnum)); } /* work the port loopback mode as per directed */ STATIC status_t do_port_loop (struct net_device * ndev, void *data) { struct sbecom_port_param pp; ci_t *ci; if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; return mkret (c4_loop_port (ci, pp.portnum, pp.port_mode)); } /* set the specified register with the given value / or just read it */ STATIC status_t do_framer_rw (struct net_device * ndev, void *data) { struct sbecom_port_param pp; ci_t *ci; int ret; if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; ret = mkret (c4_frame_rw (ci, &pp)); if (ret) return ret; if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param))) return -EFAULT; return 0; } /* set the specified register with the given value / or just read it */ STATIC status_t do_pld_rw (struct net_device * ndev, void *data) { struct sbecom_port_param pp; ci_t *ci; int ret; if (copy_from_user (&pp, data, sizeof (struct sbecom_port_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; ret = mkret (c4_pld_rw (ci, &pp)); if (ret) return ret; if (copy_to_user (data, &pp, sizeof (struct sbecom_port_param))) return -EFAULT; return 0; } /* set the specified register with the given value / or just read it */ STATIC status_t do_musycc_rw (struct net_device * ndev, void *data) { struct c4_musycc_param mp; ci_t *ci; int ret; if (copy_from_user (&mp, data, sizeof (struct c4_musycc_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; ret = mkret (c4_musycc_rw (ci, &mp)); if (ret) return ret; if (copy_to_user (data, &mp, sizeof (struct c4_musycc_param))) return -EFAULT; return 0; } STATIC status_t do_get_chan (struct net_device * ndev, void *data) { struct sbecom_chan_param cp; int ret; if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param))) return -EFAULT; if ((ret = mkret (c4_get_chan (cp.channum, &cp)))) return ret; if (copy_to_user (data, &cp, sizeof (struct sbecom_chan_param))) return -EFAULT; return 0; } STATIC status_t do_set_chan (struct net_device * ndev, void *data) { struct sbecom_chan_param cp; int ret; ci_t *ci; if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; switch (ret = mkret (c4_set_chan (cp.channum, &cp))) { case 0: return 0; default: return ret; } } STATIC status_t do_create_chan (struct net_device * ndev, void *data) { ci_t *ci; struct net_device *dev; struct sbecom_chan_param cp; int ret; if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param))) return -EFAULT; ci = get_ci_by_dev (ndev); if (!ci) return -EINVAL; dev = create_chan (ndev, ci, &cp); if (!dev) return -EBUSY; ret = mkret (c4_new_chan (ci, cp.port, cp.channum, dev)); if (ret) { rtnl_unlock (); /* needed due to Ioctl calling sequence */ unregister_hdlc_device (dev); rtnl_lock (); /* needed due to Ioctl calling sequence */ free_netdev (dev); } return ret; } STATIC status_t do_get_chan_stats (struct net_device * ndev, void *data) { struct c4_chan_stats_wrap ccs; int ret; if (copy_from_user (&ccs, data, sizeof (struct c4_chan_stats_wrap))) return -EFAULT; switch (ret = mkret (c4_get_chan_stats (ccs.channum, &ccs.stats))) { case 0: break; default: return ret; } if (copy_to_user (data, &ccs, sizeof (struct c4_chan_stats_wrap))) return -EFAULT; return 0; } STATIC status_t do_set_loglevel (struct net_device * ndev, void *data) { unsigned int cxt1e1_log_level; if (copy_from_user (&cxt1e1_log_level, data, sizeof (int))) return -EFAULT; sbecom_set_loglevel (cxt1e1_log_level); return 0; } STATIC status_t do_deluser (struct net_device * ndev, int lockit) { if (ndev->flags & IFF_UP) return -EBUSY; { ci_t *ci; mch_t *ch; const struct c4_priv *priv; int channum; priv = (struct c4_priv *) dev_to_hdlc (ndev)->priv; ci = priv->ci; channum = priv->channum; ch = c4_find_chan (channum); if (ch == NULL) return -ENOENT; ch->user = 0; /* will be freed, below */ } if (lockit) rtnl_unlock (); /* needed if Ioctl calling sequence */ unregister_hdlc_device (ndev); if (lockit) rtnl_lock (); /* needed if Ioctl calling sequence */ free_netdev (ndev); return 0; } int do_del_chan (struct net_device * musycc_dev, void *data) { struct sbecom_chan_param cp; char buf[sizeof (CHANNAME) + 3]; struct net_device *dev; int ret; if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param))) return -EFAULT; if (cp.channum > 999) return -EINVAL; snprintf (buf, sizeof(buf), CHANNAME "%d", cp.channum); if (!(dev = dev_get_by_name (&init_net, buf))) return -ENOENT; dev_put (dev); ret = do_deluser (dev, 1); if (ret) return ret; return c4_del_chan (cp.channum); } int c4_reset_board (void *); int do_reset (struct net_device * musycc_dev, void *data) { const struct c4_priv *priv; int i; for (i = 0; i < 128; i++) { struct net_device *ndev; char buf[sizeof (CHANNAME) + 3]; sprintf (buf, CHANNAME "%d", i); if (!(ndev = dev_get_by_name(&init_net, buf))) continue; priv = dev_to_hdlc (ndev)->priv; if ((unsigned long) (priv->ci) == (unsigned long) (netdev_priv(musycc_dev))) { ndev->flags &= ~IFF_UP; dev_put (ndev); netif_stop_queue (ndev); do_deluser (ndev, 1); } else dev_put (ndev); } return 0; } int do_reset_chan_stats (struct net_device * musycc_dev, void *data) { struct sbecom_chan_param cp; if (copy_from_user (&cp, data, sizeof (struct sbecom_chan_param))) return -EFAULT; return mkret (c4_del_chan_stats (cp.channum)); } STATIC status_t c4_ioctl (struct net_device * ndev, struct ifreq * ifr, int cmd) { ci_t *ci; void *data; int iocmd, iolen; status_t ret; static struct data { union { u_int8_t c; u_int32_t i; struct sbe_brd_info bip; struct sbe_drv_info dip; struct sbe_iid_info iip; struct sbe_brd_addr bap; struct sbecom_chan_stats stats; struct sbecom_chan_param param; struct temux_card_stats cards; struct sbecom_card_param cardp; struct sbecom_framer_param frp; } u; } arg; if (!capable (CAP_SYS_ADMIN)) return -EPERM; if (cmd != SIOCDEVPRIVATE + 15) return -EINVAL; if (!(ci = get_ci_by_dev (ndev))) return -EINVAL; if (ci->state != C_RUNNING) return -ENODEV; if (copy_from_user (&iocmd, ifr->ifr_data, sizeof (iocmd))) return -EFAULT; #if 0 if (copy_from_user (&len, ifr->ifr_data + sizeof (iocmd), sizeof (len))) return -EFAULT; #endif #if 0 pr_info("c4_ioctl: iocmd %x, dir %x type %x nr %x iolen %d.\n", iocmd, _IOC_DIR (iocmd), _IOC_TYPE (iocmd), _IOC_NR (iocmd), _IOC_SIZE (iocmd)); #endif iolen = _IOC_SIZE (iocmd); data = ifr->ifr_data + sizeof (iocmd); if (copy_from_user (&arg, data, iolen)) return -EFAULT; ret = 0; switch (iocmd) { case SBE_IOC_PORT_GET: //pr_info(">> SBE_IOC_PORT_GET Ioctl...\n"); ret = do_get_port (ndev, data); break; case SBE_IOC_PORT_SET: //pr_info(">> SBE_IOC_PORT_SET Ioctl...\n"); ret = do_set_port (ndev, data); break; case SBE_IOC_CHAN_GET: //pr_info(">> SBE_IOC_CHAN_GET Ioctl...\n"); ret = do_get_chan (ndev, data); break; case SBE_IOC_CHAN_SET: //pr_info(">> SBE_IOC_CHAN_SET Ioctl...\n"); ret = do_set_chan (ndev, data); break; case C4_DEL_CHAN: //pr_info(">> C4_DEL_CHAN Ioctl...\n"); ret = do_del_chan (ndev, data); break; case SBE_IOC_CHAN_NEW: ret = do_create_chan (ndev, data); break; case SBE_IOC_CHAN_GET_STAT: ret = do_get_chan_stats (ndev, data); break; case SBE_IOC_LOGLEVEL: ret = do_set_loglevel (ndev, data); break; case SBE_IOC_RESET_DEV: ret = do_reset (ndev, data); break; case SBE_IOC_CHAN_DEL_STAT: ret = do_reset_chan_stats (ndev, data); break; case C4_LOOP_PORT: ret = do_port_loop (ndev, data); break; case C4_RW_FRMR: ret = do_framer_rw (ndev, data); break; case C4_RW_MSYC: ret = do_musycc_rw (ndev, data); break; case C4_RW_PLD: ret = do_pld_rw (ndev, data); break; case SBE_IOC_IID_GET: ret = (iolen == sizeof (struct sbe_iid_info)) ? c4_get_iidinfo (ci, &arg.u.iip) : -EFAULT; if (ret == 0) /* no error, copy data */ if (copy_to_user (data, &arg, iolen)) return -EFAULT; break; default: //pr_info(">> c4_ioctl: EINVAL - unknown iocmd <%x>\n", iocmd); ret = -EINVAL; break; } return mkret (ret); } static const struct net_device_ops c4_ops = { .ndo_open = void_open, .ndo_start_xmit = c4_linux_xmit, .ndo_do_ioctl = c4_ioctl, }; static void c4_setup(struct net_device *dev) { dev->type = ARPHRD_VOID; dev->netdev_ops = &c4_ops; } struct net_device *__init c4_add_dev (hdw_info_t * hi, int brdno, unsigned long f0, unsigned long f1, int irq0, int irq1) { struct net_device *ndev; ci_t *ci; ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup); if (!ndev) { pr_warning("%s: no memory for struct net_device !\n", hi->devname); error_flag = ENOMEM; return 0; } ci = (ci_t *)(netdev_priv(ndev)); ndev->irq = irq0; ci->hdw_info = hi; ci->state = C_INIT; /* mark as hardware not available */ ci->next = c4_list; c4_list = ci; ci->brdno = ci->next ? ci->next->brdno + 1 : 0; if (CI == 0) CI = ci; /* DEBUG, only board 0 usage */ strcpy (ci->devname, hi->devname); ci->release = &pmcc4_OSSI_release[0]; /* tasklet */ #if defined(SBE_ISR_TASKLET) tasklet_init (&ci->ci_musycc_isr_tasklet, (void (*) (unsigned long)) musycc_intr_bh_tasklet, (unsigned long) ci); if (atomic_read (&ci->ci_musycc_isr_tasklet.count) == 0) tasklet_disable_nosync (&ci->ci_musycc_isr_tasklet); #elif defined(SBE_ISR_IMMEDIATE) ci->ci_musycc_isr_tq.routine = (void *) (unsigned long) musycc_intr_bh_tasklet; ci->ci_musycc_isr_tq.data = ci; #endif if (register_netdev (ndev) || (c4_init (ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS)) { OS_kfree (netdev_priv(ndev)); OS_kfree (ndev); error_flag = ENODEV; return 0; } /************************************************************* * int request_irq(unsigned int irq, * void (*handler)(int, void *, struct pt_regs *), * unsigned long flags, const char *dev_name, void *dev_id); * wherein: * irq -> The interrupt number that is being requested. * handler -> Pointer to handling function being installed. * flags -> A bit mask of options related to interrupt management. * dev_name -> String used in /proc/interrupts to show owner of interrupt. * dev_id -> Pointer (for shared interrupt lines) to point to its own * private data area (to identify which device is interrupting). * * extern void free_irq(unsigned int irq, void *dev_id); **************************************************************/ if (request_irq (irq0, &c4_linux_interrupt, IRQF_SHARED, ndev->name, ndev)) { pr_warning("%s: MUSYCC could not get irq: %d\n", ndev->name, irq0); unregister_netdev (ndev); OS_kfree (netdev_priv(ndev)); OS_kfree (ndev); error_flag = EIO; return 0; } #ifdef CONFIG_SBE_PMCC4_NCOMM if (request_irq (irq1, &c4_ebus_interrupt, IRQF_SHARED, ndev->name, ndev)) { pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1); unregister_netdev (ndev); free_irq (irq0, ndev); OS_kfree (netdev_priv(ndev)); OS_kfree (ndev); error_flag = EIO; return 0; } #endif /* setup board identification information */ { u_int32_t tmp; hdw_sn_get (hi, brdno); /* also sets PROM format type (promfmt) * for later usage */ switch (hi->promfmt) { case PROM_FORMAT_TYPE1: memcpy (ndev->dev_addr, (FLD_TYPE1 *) (hi->mfg_info.pft1.Serial), 6); memcpy (&tmp, (FLD_TYPE1 *) (hi->mfg_info.pft1.Id), 4); /* unaligned data * acquisition */ ci->brd_id = cpu_to_be32 (tmp); break; case PROM_FORMAT_TYPE2: memcpy (ndev->dev_addr, (FLD_TYPE2 *) (hi->mfg_info.pft2.Serial), 6); memcpy (&tmp, (FLD_TYPE2 *) (hi->mfg_info.pft2.Id), 4); /* unaligned data * acquisition */ ci->brd_id = cpu_to_be32 (tmp); break; default: ci->brd_id = 0; memset (ndev->dev_addr, 0, 6); break; } #if 1 sbeid_set_hdwbid (ci); /* requires bid to be preset */ #else sbeid_set_bdtype (ci); /* requires hdw_bid to be preset */ #endif } #ifdef CONFIG_PROC_FS sbecom_proc_brd_init (ci); #endif #if defined(SBE_ISR_TASKLET) tasklet_enable (&ci->ci_musycc_isr_tasklet); #endif if ((error_flag = c4_init2 (ci)) != SBE_DRVR_SUCCESS) { #ifdef CONFIG_PROC_FS sbecom_proc_brd_cleanup (ci); #endif unregister_netdev (ndev); free_irq (irq1, ndev); free_irq (irq0, ndev); OS_kfree (netdev_priv(ndev)); OS_kfree (ndev); return 0; /* failure, error_flag is set */ } return ndev; } STATIC int __init c4_mod_init (void) { int rtn; pr_warning("%s\n", pmcc4_OSSI_release); if ((rtn = c4hw_attach_all ())) return -rtn; /* installation failure - see system log */ /* housekeeping notifications */ if (cxt1e1_log_level != log_level_default) pr_info("NOTE: driver parameter <cxt1e1_log_level> changed from default %d to %d.\n", log_level_default, cxt1e1_log_level); if (cxt1e1_max_mru != max_mru_default) pr_info("NOTE: driver parameter <cxt1e1_max_mru> changed from default %d to %d.\n", max_mru_default, cxt1e1_max_mru); if (cxt1e1_max_mtu != max_mtu_default) pr_info("NOTE: driver parameter <cxt1e1_max_mtu> changed from default %d to %d.\n", max_mtu_default, cxt1e1_max_mtu); if (max_rxdesc_used != max_rxdesc_default) { if (max_rxdesc_used > 2000) max_rxdesc_used = 2000; /* out-of-bounds reset */ pr_info("NOTE: driver parameter <max_rxdesc_used> changed from default %d to %d.\n", max_rxdesc_default, max_rxdesc_used); } if (max_txdesc_used != max_txdesc_default) { if (max_txdesc_used > 1000) max_txdesc_used = 1000; /* out-of-bounds reset */ pr_info("NOTE: driver parameter <max_txdesc_used> changed from default %d to %d.\n", max_txdesc_default, max_txdesc_used); } return 0; /* installation success */ } /* * find any still allocated hdlc registrations and unregister via call to * do_deluser() */ STATIC void __exit cleanup_hdlc (void) { hdw_info_t *hi; ci_t *ci; struct net_device *ndev; int i, j, k; for (i = 0, hi = hdw_info; i < MAX_BOARDS; i++, hi++) { if (hi->ndev) /* a board has been attached */ { ci = (ci_t *)(netdev_priv(hi->ndev)); for (j = 0; j < ci->max_port; j++) for (k = 0; k < MUSYCC_NCHANS; k++) if ((ndev = ci->port[j].chan[k]->user)) { do_deluser (ndev, 0); } } } } STATIC void __exit c4_mod_remove (void) { cleanup_hdlc(); /* delete any missed channels */ cleanup_devs(); c4_cleanup(); cleanup_ioremap(); pr_info("SBE - driver removed.\n"); } module_init (c4_mod_init); module_exit (c4_mod_remove); MODULE_AUTHOR ("SBE Technical Services <support@sbei.com>"); MODULE_DESCRIPTION ("wanPCI-CxT1E1 Generic HDLC WAN Driver module"); #ifdef MODULE_LICENSE MODULE_LICENSE ("GPL"); #endif /*** End-of-File ***/
gpl-2.0
munoz0raul/linux-toradex_ACM_iMX6
sound/soc/codecs/jz4740.c
2146
11351
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/regmap.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/tlv.h> #define JZ4740_REG_CODEC_1 0x0 #define JZ4740_REG_CODEC_2 0x4 #define JZ4740_CODEC_1_LINE_ENABLE BIT(29) #define JZ4740_CODEC_1_MIC_ENABLE BIT(28) #define JZ4740_CODEC_1_SW1_ENABLE BIT(27) #define JZ4740_CODEC_1_ADC_ENABLE BIT(26) #define JZ4740_CODEC_1_SW2_ENABLE BIT(25) #define JZ4740_CODEC_1_DAC_ENABLE BIT(24) #define JZ4740_CODEC_1_VREF_DISABLE BIT(20) #define JZ4740_CODEC_1_VREF_AMP_DISABLE BIT(19) #define JZ4740_CODEC_1_VREF_PULLDOWN BIT(18) #define JZ4740_CODEC_1_VREF_LOW_CURRENT BIT(17) #define JZ4740_CODEC_1_VREF_HIGH_CURRENT BIT(16) #define JZ4740_CODEC_1_HEADPHONE_DISABLE BIT(14) #define JZ4740_CODEC_1_HEADPHONE_AMP_CHANGE_ANY BIT(13) #define JZ4740_CODEC_1_HEADPHONE_CHARGE BIT(12) #define JZ4740_CODEC_1_HEADPHONE_PULLDOWN (BIT(11) | BIT(10)) #define JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M BIT(9) #define JZ4740_CODEC_1_HEADPHONE_POWERDOWN BIT(8) #define JZ4740_CODEC_1_SUSPEND BIT(1) #define JZ4740_CODEC_1_RESET BIT(0) #define JZ4740_CODEC_1_LINE_ENABLE_OFFSET 29 #define JZ4740_CODEC_1_MIC_ENABLE_OFFSET 28 #define JZ4740_CODEC_1_SW1_ENABLE_OFFSET 27 #define JZ4740_CODEC_1_ADC_ENABLE_OFFSET 26 #define JZ4740_CODEC_1_SW2_ENABLE_OFFSET 25 #define JZ4740_CODEC_1_DAC_ENABLE_OFFSET 24 #define JZ4740_CODEC_1_HEADPHONE_DISABLE_OFFSET 14 #define JZ4740_CODEC_1_HEADPHONE_POWERDOWN_OFFSET 8 #define JZ4740_CODEC_2_INPUT_VOLUME_MASK 0x1f0000 #define JZ4740_CODEC_2_SAMPLE_RATE_MASK 0x000f00 #define JZ4740_CODEC_2_MIC_BOOST_GAIN_MASK 0x000030 #define JZ4740_CODEC_2_HEADPHONE_VOLUME_MASK 0x000003 #define JZ4740_CODEC_2_INPUT_VOLUME_OFFSET 16 #define JZ4740_CODEC_2_SAMPLE_RATE_OFFSET 8 #define JZ4740_CODEC_2_MIC_BOOST_GAIN_OFFSET 4 #define JZ4740_CODEC_2_HEADPHONE_VOLUME_OFFSET 0 static const struct reg_default jz4740_codec_reg_defaults[] = { { JZ4740_REG_CODEC_1, 0x021b2302 }, { JZ4740_REG_CODEC_2, 0x00170803 }, }; struct jz4740_codec { struct regmap *regmap; }; static const unsigned int jz4740_mic_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 2, TLV_DB_SCALE_ITEM(0, 600, 0), 3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0), }; static const DECLARE_TLV_DB_SCALE(jz4740_out_tlv, 0, 200, 0); static const DECLARE_TLV_DB_SCALE(jz4740_in_tlv, -3450, 150, 0); static const struct snd_kcontrol_new jz4740_codec_controls[] = { SOC_SINGLE_TLV("Master Playback Volume", JZ4740_REG_CODEC_2, JZ4740_CODEC_2_HEADPHONE_VOLUME_OFFSET, 3, 0, jz4740_out_tlv), SOC_SINGLE_TLV("Master Capture Volume", JZ4740_REG_CODEC_2, JZ4740_CODEC_2_INPUT_VOLUME_OFFSET, 31, 0, jz4740_in_tlv), SOC_SINGLE("Master Playback Switch", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_HEADPHONE_DISABLE_OFFSET, 1, 1), SOC_SINGLE_TLV("Mic Capture Volume", JZ4740_REG_CODEC_2, JZ4740_CODEC_2_MIC_BOOST_GAIN_OFFSET, 3, 0, jz4740_mic_tlv), }; static const struct snd_kcontrol_new jz4740_codec_output_controls[] = { SOC_DAPM_SINGLE("Bypass Switch", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_SW1_ENABLE_OFFSET, 1, 0), SOC_DAPM_SINGLE("DAC Switch", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_SW2_ENABLE_OFFSET, 1, 0), }; static const struct snd_kcontrol_new jz4740_codec_input_controls[] = { SOC_DAPM_SINGLE("Line Capture Switch", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_LINE_ENABLE_OFFSET, 1, 0), SOC_DAPM_SINGLE("Mic Capture Switch", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_MIC_ENABLE_OFFSET, 1, 0), }; static const struct snd_soc_dapm_widget jz4740_codec_dapm_widgets[] = { SND_SOC_DAPM_ADC("ADC", "Capture", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_ADC_ENABLE_OFFSET, 0), SND_SOC_DAPM_DAC("DAC", "Playback", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_DAC_ENABLE_OFFSET, 0), SND_SOC_DAPM_MIXER("Output Mixer", JZ4740_REG_CODEC_1, JZ4740_CODEC_1_HEADPHONE_POWERDOWN_OFFSET, 1, jz4740_codec_output_controls, ARRAY_SIZE(jz4740_codec_output_controls)), SND_SOC_DAPM_MIXER_NAMED_CTL("Input Mixer", SND_SOC_NOPM, 0, 0, jz4740_codec_input_controls, ARRAY_SIZE(jz4740_codec_input_controls)), SND_SOC_DAPM_MIXER("Line Input", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_INPUT("MIC"), SND_SOC_DAPM_INPUT("LIN"), SND_SOC_DAPM_INPUT("RIN"), }; static const struct snd_soc_dapm_route jz4740_codec_dapm_routes[] = { {"Line Input", NULL, "LIN"}, {"Line Input", NULL, "RIN"}, {"Input Mixer", "Line Capture Switch", "Line Input"}, {"Input Mixer", "Mic Capture Switch", "MIC"}, {"ADC", NULL, "Input Mixer"}, {"Output Mixer", "Bypass Switch", "Input Mixer"}, {"Output Mixer", "DAC Switch", "DAC"}, {"LOUT", NULL, "Output Mixer"}, {"ROUT", NULL, "Output Mixer"}, }; static int jz4740_codec_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(dai->codec); uint32_t val; switch (params_rate(params)) { case 8000: val = 0; break; case 11025: val = 1; break; case 12000: val = 2; break; case 16000: val = 3; break; case 22050: val = 4; break; case 24000: val = 5; break; case 32000: val = 6; break; case 44100: val = 7; break; case 48000: val = 8; break; default: return -EINVAL; } val <<= JZ4740_CODEC_2_SAMPLE_RATE_OFFSET; regmap_update_bits(jz4740_codec->regmap, JZ4740_REG_CODEC_2, JZ4740_CODEC_2_SAMPLE_RATE_MASK, val); return 0; } static const struct snd_soc_dai_ops jz4740_codec_dai_ops = { .hw_params = jz4740_codec_hw_params, }; static struct snd_soc_dai_driver jz4740_codec_dai = { .name = "jz4740-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8, }, .ops = &jz4740_codec_dai_ops, .symmetric_rates = 1, }; static void jz4740_codec_wakeup(struct regmap *regmap) { regmap_update_bits(regmap, JZ4740_REG_CODEC_1, JZ4740_CODEC_1_RESET, JZ4740_CODEC_1_RESET); udelay(2); regmap_update_bits(regmap, JZ4740_REG_CODEC_1, JZ4740_CODEC_1_SUSPEND | JZ4740_CODEC_1_RESET, 0); regcache_sync(regmap); } static int jz4740_codec_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec); struct regmap *regmap = jz4740_codec->regmap; unsigned int mask; unsigned int value; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: mask = JZ4740_CODEC_1_VREF_DISABLE | JZ4740_CODEC_1_VREF_AMP_DISABLE | JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M; value = 0; regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value); break; case SND_SOC_BIAS_STANDBY: /* The only way to clear the suspend flag is to reset the codec */ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) jz4740_codec_wakeup(regmap); mask = JZ4740_CODEC_1_VREF_DISABLE | JZ4740_CODEC_1_VREF_AMP_DISABLE | JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M; value = JZ4740_CODEC_1_VREF_DISABLE | JZ4740_CODEC_1_VREF_AMP_DISABLE | JZ4740_CODEC_1_HEADPHONE_POWERDOWN_M; regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value); break; case SND_SOC_BIAS_OFF: mask = JZ4740_CODEC_1_SUSPEND; value = JZ4740_CODEC_1_SUSPEND; regmap_update_bits(regmap, JZ4740_REG_CODEC_1, mask, value); regcache_mark_dirty(regmap); break; default: break; } codec->dapm.bias_level = level; return 0; } static int jz4740_codec_dev_probe(struct snd_soc_codec *codec) { struct jz4740_codec *jz4740_codec = snd_soc_codec_get_drvdata(codec); regmap_update_bits(jz4740_codec->regmap, JZ4740_REG_CODEC_1, JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE); jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int jz4740_codec_dev_remove(struct snd_soc_codec *codec) { jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } #ifdef CONFIG_PM_SLEEP static int jz4740_codec_suspend(struct snd_soc_codec *codec) { return jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_OFF); } static int jz4740_codec_resume(struct snd_soc_codec *codec) { return jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY); } #else #define jz4740_codec_suspend NULL #define jz4740_codec_resume NULL #endif static struct snd_soc_codec_driver soc_codec_dev_jz4740_codec = { .probe = jz4740_codec_dev_probe, .remove = jz4740_codec_dev_remove, .suspend = jz4740_codec_suspend, .resume = jz4740_codec_resume, .set_bias_level = jz4740_codec_set_bias_level, .controls = jz4740_codec_controls, .num_controls = ARRAY_SIZE(jz4740_codec_controls), .dapm_widgets = jz4740_codec_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(jz4740_codec_dapm_widgets), .dapm_routes = jz4740_codec_dapm_routes, .num_dapm_routes = ARRAY_SIZE(jz4740_codec_dapm_routes), }; static const struct regmap_config jz4740_codec_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = JZ4740_REG_CODEC_2, .reg_defaults = jz4740_codec_reg_defaults, .num_reg_defaults = ARRAY_SIZE(jz4740_codec_reg_defaults), .cache_type = REGCACHE_RBTREE, }; static int jz4740_codec_probe(struct platform_device *pdev) { int ret; struct jz4740_codec *jz4740_codec; struct resource *mem; void __iomem *base; jz4740_codec = devm_kzalloc(&pdev->dev, sizeof(*jz4740_codec), GFP_KERNEL); if (!jz4740_codec) return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(base)) return PTR_ERR(base); jz4740_codec->regmap = devm_regmap_init_mmio(&pdev->dev, base, &jz4740_codec_regmap_config); if (IS_ERR(jz4740_codec->regmap)) return PTR_ERR(jz4740_codec->regmap); platform_set_drvdata(pdev, jz4740_codec); ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_jz4740_codec, &jz4740_codec_dai, 1); if (ret) dev_err(&pdev->dev, "Failed to register codec\n"); return ret; } static int jz4740_codec_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver jz4740_codec_driver = { .probe = jz4740_codec_probe, .remove = jz4740_codec_remove, .driver = { .name = "jz4740-codec", .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_codec_driver); MODULE_DESCRIPTION("JZ4740 SoC internal codec driver"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:jz4740-codec");
gpl-2.0
shakalaca/ASUS_ZenFone_ZE551KL
kernel/drivers/mfd/mc13xxx-i2c.c
2402
3107
/* * Copyright 2009-2010 Creative Product Design * Marc Reilly marc@cpdesign.com.au * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/mfd/core.h> #include <linux/mfd/mc13xxx.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/i2c.h> #include <linux/err.h> #include "mc13xxx.h" static const struct i2c_device_id mc13xxx_i2c_device_id[] = { { .name = "mc13892", .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892, }, { .name = "mc34708", .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id); static const struct of_device_id mc13xxx_dt_ids[] = { { .compatible = "fsl,mc13892", .data = &mc13xxx_variant_mc13892, }, { .compatible = "fsl,mc34708", .data = &mc13xxx_variant_mc34708, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); static struct regmap_config mc13xxx_regmap_i2c_config = { .reg_bits = 8, .val_bits = 24, .max_register = MC13XXX_NUMREGS, .cache_type = REGCACHE_NONE, }; static int mc13xxx_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct mc13xxx *mc13xxx; struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev); int ret; mc13xxx = devm_kzalloc(&client->dev, sizeof(*mc13xxx), GFP_KERNEL); if (!mc13xxx) return -ENOMEM; dev_set_drvdata(&client->dev, mc13xxx); mc13xxx->dev = &client->dev; mutex_init(&mc13xxx->lock); mc13xxx->regmap = devm_regmap_init_i2c(client, &mc13xxx_regmap_i2c_config); if (IS_ERR(mc13xxx->regmap)) { ret = PTR_ERR(mc13xxx->regmap); dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n", ret); dev_set_drvdata(&client->dev, NULL); return ret; } if (client->dev.of_node) { const struct of_device_id *of_id = of_match_device(mc13xxx_dt_ids, &client->dev); mc13xxx->variant = of_id->data; } else { mc13xxx->variant = (void *)id->driver_data; } ret = mc13xxx_common_init(mc13xxx, pdata, client->irq); return ret; } static int mc13xxx_i2c_remove(struct i2c_client *client) { struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev); mc13xxx_common_cleanup(mc13xxx); return 0; } static struct i2c_driver mc13xxx_i2c_driver = { .id_table = mc13xxx_i2c_device_id, .driver = { .owner = THIS_MODULE, .name = "mc13xxx", .of_match_table = mc13xxx_dt_ids, }, .probe = mc13xxx_i2c_probe, .remove = mc13xxx_i2c_remove, }; static int __init mc13xxx_i2c_init(void) { return i2c_add_driver(&mc13xxx_i2c_driver); } subsys_initcall(mc13xxx_i2c_init); static void __exit mc13xxx_i2c_exit(void) { i2c_del_driver(&mc13xxx_i2c_driver); } module_exit(mc13xxx_i2c_exit); MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC"); MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au"); MODULE_LICENSE("GPL v2");
gpl-2.0
ausdim/SGS3-JB-U8
drivers/mtd/onenand/samsung.c
2658
29468
/* * Samsung S3C64XX/S5PC1XX OneNAND driver * * Copyright © 2008-2010 Samsung Electronics * Kyungmin Park <kyungmin.park@samsung.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Implementation: * S3C64XX and S5PC100: emulate the pseudo BufferRAM * S5PC110: use DMA */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/onenand.h> #include <linux/mtd/partitions.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <asm/mach/flash.h> #include <plat/regs-onenand.h> #include <linux/io.h> enum soc_type { TYPE_S3C6400, TYPE_S3C6410, TYPE_S5PC100, TYPE_S5PC110, }; #define ONENAND_ERASE_STATUS 0x00 #define ONENAND_MULTI_ERASE_SET 0x01 #define ONENAND_ERASE_START 0x03 #define ONENAND_UNLOCK_START 0x08 #define ONENAND_UNLOCK_END 0x09 #define ONENAND_LOCK_START 0x0A #define ONENAND_LOCK_END 0x0B #define ONENAND_LOCK_TIGHT_START 0x0C #define ONENAND_LOCK_TIGHT_END 0x0D #define ONENAND_UNLOCK_ALL 0x0E #define ONENAND_OTP_ACCESS 0x12 #define ONENAND_SPARE_ACCESS_ONLY 0x13 #define ONENAND_MAIN_ACCESS_ONLY 0x14 #define ONENAND_ERASE_VERIFY 0x15 #define ONENAND_MAIN_SPARE_ACCESS 0x16 #define ONENAND_PIPELINE_READ 0x4000 #define MAP_00 (0x0) #define MAP_01 (0x1) #define MAP_10 (0x2) #define MAP_11 (0x3) #define S3C64XX_CMD_MAP_SHIFT 24 #define S5PC100_CMD_MAP_SHIFT 26 #define S3C6400_FBA_SHIFT 10 #define S3C6400_FPA_SHIFT 4 #define S3C6400_FSA_SHIFT 2 #define S3C6410_FBA_SHIFT 12 #define S3C6410_FPA_SHIFT 6 #define S3C6410_FSA_SHIFT 4 #define S5PC100_FBA_SHIFT 13 #define S5PC100_FPA_SHIFT 7 #define S5PC100_FSA_SHIFT 5 /* S5PC110 specific definitions */ #define S5PC110_DMA_SRC_ADDR 0x400 #define S5PC110_DMA_SRC_CFG 0x404 #define S5PC110_DMA_DST_ADDR 0x408 #define S5PC110_DMA_DST_CFG 0x40C #define S5PC110_DMA_TRANS_SIZE 0x414 #define S5PC110_DMA_TRANS_CMD 0x418 #define S5PC110_DMA_TRANS_STATUS 0x41C #define S5PC110_DMA_TRANS_DIR 0x420 #define S5PC110_INTC_DMA_CLR 0x1004 #define S5PC110_INTC_ONENAND_CLR 0x1008 #define S5PC110_INTC_DMA_MASK 0x1024 #define S5PC110_INTC_ONENAND_MASK 0x1028 #define S5PC110_INTC_DMA_PEND 0x1044 #define S5PC110_INTC_ONENAND_PEND 0x1048 #define S5PC110_INTC_DMA_STATUS 0x1064 #define S5PC110_INTC_ONENAND_STATUS 0x1068 #define S5PC110_INTC_DMA_TD (1 << 24) #define S5PC110_INTC_DMA_TE (1 << 16) #define S5PC110_DMA_CFG_SINGLE (0x0 << 16) #define S5PC110_DMA_CFG_4BURST (0x2 << 16) #define S5PC110_DMA_CFG_8BURST (0x3 << 16) #define S5PC110_DMA_CFG_16BURST (0x4 << 16) #define S5PC110_DMA_CFG_INC (0x0 << 8) #define S5PC110_DMA_CFG_CNT (0x1 << 8) #define S5PC110_DMA_CFG_8BIT (0x0 << 0) #define S5PC110_DMA_CFG_16BIT (0x1 << 0) #define S5PC110_DMA_CFG_32BIT (0x2 << 0) #define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \ S5PC110_DMA_CFG_INC | \ S5PC110_DMA_CFG_16BIT) #define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \ S5PC110_DMA_CFG_INC | \ S5PC110_DMA_CFG_32BIT) #define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ S5PC110_DMA_CFG_INC | \ S5PC110_DMA_CFG_32BIT) #define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \ S5PC110_DMA_CFG_INC | \ S5PC110_DMA_CFG_16BIT) #define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18) #define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16) #define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0) #define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18) #define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17) #define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16) #define S5PC110_DMA_DIR_READ 0x0 #define S5PC110_DMA_DIR_WRITE 0x1 struct s3c_onenand { struct mtd_info *mtd; struct platform_device *pdev; enum soc_type type; void __iomem *base; struct resource *base_res; void __iomem *ahb_addr; struct resource *ahb_res; int bootram_command; void __iomem *page_buf; void __iomem *oob_buf; unsigned int (*mem_addr)(int fba, int fpa, int fsa); unsigned int (*cmd_map)(unsigned int type, unsigned int val); void __iomem *dma_addr; struct resource *dma_res; unsigned long phys_base; struct completion complete; struct mtd_partition *parts; }; #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) #define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr))) #define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr))) #define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2))) static struct s3c_onenand *onenand; static const char *part_probes[] = { "cmdlinepart", NULL, }; static inline int s3c_read_reg(int offset) { return readl(onenand->base + offset); } static inline void s3c_write_reg(int value, int offset) { writel(value, onenand->base + offset); } static inline int s3c_read_cmd(unsigned int cmd) { return readl(onenand->ahb_addr + cmd); } static inline void s3c_write_cmd(int value, unsigned int cmd) { writel(value, onenand->ahb_addr + cmd); } #ifdef SAMSUNG_DEBUG static void s3c_dump_reg(void) { int i; for (i = 0; i < 0x400; i += 0x40) { printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n", (unsigned int) onenand->base + i, s3c_read_reg(i), s3c_read_reg(i + 0x10), s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30)); } } #endif static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val) { return (type << S3C64XX_CMD_MAP_SHIFT) | val; } static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val) { return (type << S5PC100_CMD_MAP_SHIFT) | val; } static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa) { return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) | (fsa << S3C6400_FSA_SHIFT); } static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa) { return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) | (fsa << S3C6410_FSA_SHIFT); } static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa) { return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) | (fsa << S5PC100_FSA_SHIFT); } static void s3c_onenand_reset(void) { unsigned long timeout = 0x10000; int stat; s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); while (1 && timeout--) { stat = s3c_read_reg(INT_ERR_STAT_OFFSET); if (stat & RST_CMP) break; } stat = s3c_read_reg(INT_ERR_STAT_OFFSET); s3c_write_reg(stat, INT_ERR_ACK_OFFSET); /* Clear interrupt */ s3c_write_reg(0x0, INT_ERR_ACK_OFFSET); /* Clear the ECC status */ s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET); } static unsigned short s3c_onenand_readw(void __iomem *addr) { struct onenand_chip *this = onenand->mtd->priv; struct device *dev = &onenand->pdev->dev; int reg = addr - this->base; int word_addr = reg >> 1; int value; /* It's used for probing time */ switch (reg) { case ONENAND_REG_MANUFACTURER_ID: return s3c_read_reg(MANUFACT_ID_OFFSET); case ONENAND_REG_DEVICE_ID: return s3c_read_reg(DEVICE_ID_OFFSET); case ONENAND_REG_VERSION_ID: return s3c_read_reg(FLASH_VER_ID_OFFSET); case ONENAND_REG_DATA_BUFFER_SIZE: return s3c_read_reg(DATA_BUF_SIZE_OFFSET); case ONENAND_REG_TECHNOLOGY: return s3c_read_reg(TECH_OFFSET); case ONENAND_REG_SYS_CFG1: return s3c_read_reg(MEM_CFG_OFFSET); /* Used at unlock all status */ case ONENAND_REG_CTRL_STATUS: return 0; case ONENAND_REG_WP_STATUS: return ONENAND_WP_US; default: break; } /* BootRAM access control */ if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) { if (word_addr == 0) return s3c_read_reg(MANUFACT_ID_OFFSET); if (word_addr == 1) return s3c_read_reg(DEVICE_ID_OFFSET); if (word_addr == 2) return s3c_read_reg(FLASH_VER_ID_OFFSET); } value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff; dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, word_addr, value); return value; } static void s3c_onenand_writew(unsigned short value, void __iomem *addr) { struct onenand_chip *this = onenand->mtd->priv; struct device *dev = &onenand->pdev->dev; unsigned int reg = addr - this->base; unsigned int word_addr = reg >> 1; /* It's used for probing time */ switch (reg) { case ONENAND_REG_SYS_CFG1: s3c_write_reg(value, MEM_CFG_OFFSET); return; case ONENAND_REG_START_ADDRESS1: case ONENAND_REG_START_ADDRESS2: return; /* Lock/lock-tight/unlock/unlock_all */ case ONENAND_REG_START_BLOCK_ADDRESS: return; default: break; } /* BootRAM access control */ if ((unsigned int)addr < ONENAND_DATARAM) { if (value == ONENAND_CMD_READID) { onenand->bootram_command = 1; return; } if (value == ONENAND_CMD_RESET) { s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET); onenand->bootram_command = 0; return; } } dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__, word_addr, value); s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr)); } static int s3c_onenand_wait(struct mtd_info *mtd, int state) { struct device *dev = &onenand->pdev->dev; unsigned int flags = INT_ACT; unsigned int stat, ecc; unsigned long timeout; switch (state) { case FL_READING: flags |= BLK_RW_CMP | LOAD_CMP; break; case FL_WRITING: flags |= BLK_RW_CMP | PGM_CMP; break; case FL_ERASING: flags |= BLK_RW_CMP | ERS_CMP; break; case FL_LOCKING: flags |= BLK_RW_CMP; break; default: break; } /* The 20 msec is enough */ timeout = jiffies + msecs_to_jiffies(20); while (time_before(jiffies, timeout)) { stat = s3c_read_reg(INT_ERR_STAT_OFFSET); if (stat & flags) break; if (state != FL_READING) cond_resched(); } /* To get correct interrupt status in timeout case */ stat = s3c_read_reg(INT_ERR_STAT_OFFSET); s3c_write_reg(stat, INT_ERR_ACK_OFFSET); /* * In the Spec. it checks the controller status first * However if you get the correct information in case of * power off recovery (POR) test, it should read ECC status first */ if (stat & LOAD_CMP) { ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { dev_info(dev, "%s: ECC error = 0x%04x\n", __func__, ecc); mtd->ecc_stats.failed++; return -EBADMSG; } } if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) { dev_info(dev, "%s: controller error = 0x%04x\n", __func__, stat); if (stat & LOCKED_BLK) dev_info(dev, "%s: it's locked error = 0x%04x\n", __func__, stat); return -EIO; } return 0; } static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) { struct onenand_chip *this = mtd->priv; unsigned int *m, *s; int fba, fpa, fsa = 0; unsigned int mem_addr, cmd_map_01, cmd_map_10; int i, mcount, scount; int index; fba = (int) (addr >> this->erase_shift); fpa = (int) (addr >> this->page_shift); fpa &= this->page_mask; mem_addr = onenand->mem_addr(fba, fpa, fsa); cmd_map_01 = CMD_MAP_01(onenand, mem_addr); cmd_map_10 = CMD_MAP_10(onenand, mem_addr); switch (cmd) { case ONENAND_CMD_READ: case ONENAND_CMD_READOOB: case ONENAND_CMD_BUFFERRAM: ONENAND_SET_NEXT_BUFFERRAM(this); default: break; } index = ONENAND_CURRENT_BUFFERRAM(this); /* * Emulate Two BufferRAMs and access with 4 bytes pointer */ m = (unsigned int *) onenand->page_buf; s = (unsigned int *) onenand->oob_buf; if (index) { m += (this->writesize >> 2); s += (mtd->oobsize >> 2); } mcount = mtd->writesize >> 2; scount = mtd->oobsize >> 2; switch (cmd) { case ONENAND_CMD_READ: /* Main */ for (i = 0; i < mcount; i++) *m++ = s3c_read_cmd(cmd_map_01); return 0; case ONENAND_CMD_READOOB: s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); /* Main */ for (i = 0; i < mcount; i++) *m++ = s3c_read_cmd(cmd_map_01); /* Spare */ for (i = 0; i < scount; i++) *s++ = s3c_read_cmd(cmd_map_01); s3c_write_reg(0, TRANS_SPARE_OFFSET); return 0; case ONENAND_CMD_PROG: /* Main */ for (i = 0; i < mcount; i++) s3c_write_cmd(*m++, cmd_map_01); return 0; case ONENAND_CMD_PROGOOB: s3c_write_reg(TSRF, TRANS_SPARE_OFFSET); /* Main - dummy write */ for (i = 0; i < mcount; i++) s3c_write_cmd(0xffffffff, cmd_map_01); /* Spare */ for (i = 0; i < scount; i++) s3c_write_cmd(*s++, cmd_map_01); s3c_write_reg(0, TRANS_SPARE_OFFSET); return 0; case ONENAND_CMD_UNLOCK_ALL: s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10); return 0; case ONENAND_CMD_ERASE: s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10); return 0; default: break; } return 0; } static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area) { struct onenand_chip *this = mtd->priv; int index = ONENAND_CURRENT_BUFFERRAM(this); unsigned char *p; if (area == ONENAND_DATARAM) { p = (unsigned char *) onenand->page_buf; if (index == 1) p += this->writesize; } else { p = (unsigned char *) onenand->oob_buf; if (index == 1) p += mtd->oobsize; } return p; } static int onenand_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { unsigned char *p; p = s3c_get_bufferram(mtd, area); memcpy(buffer, p + offset, count); return 0; } static int onenand_write_bufferram(struct mtd_info *mtd, int area, const unsigned char *buffer, int offset, size_t count) { unsigned char *p; p = s3c_get_bufferram(mtd, area); memcpy(p + offset, buffer, count); return 0; } static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction); static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction) { void __iomem *base = onenand->dma_addr; int status; unsigned long timeout; writel(src, base + S5PC110_DMA_SRC_ADDR); writel(dst, base + S5PC110_DMA_DST_ADDR); if (direction == S5PC110_DMA_DIR_READ) { writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG); writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG); } else { writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG); writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG); } writel(count, base + S5PC110_DMA_TRANS_SIZE); writel(direction, base + S5PC110_DMA_TRANS_DIR); writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD); /* * There's no exact timeout values at Spec. * In real case it takes under 1 msec. * So 20 msecs are enough. */ timeout = jiffies + msecs_to_jiffies(20); do { status = readl(base + S5PC110_DMA_TRANS_STATUS); if (status & S5PC110_DMA_TRANS_STATUS_TE) { writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD); return -EIO; } } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) && time_before(jiffies, timeout)); writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); return 0; } static irqreturn_t s5pc110_onenand_irq(int irq, void *data) { void __iomem *base = onenand->dma_addr; int status, cmd = 0; status = readl(base + S5PC110_INTC_DMA_STATUS); if (likely(status & S5PC110_INTC_DMA_TD)) cmd = S5PC110_DMA_TRANS_CMD_TDC; if (unlikely(status & S5PC110_INTC_DMA_TE)) cmd = S5PC110_DMA_TRANS_CMD_TEC; writel(cmd, base + S5PC110_DMA_TRANS_CMD); writel(status, base + S5PC110_INTC_DMA_CLR); if (!onenand->complete.done) complete(&onenand->complete); return IRQ_HANDLED; } static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction) { void __iomem *base = onenand->dma_addr; int status; status = readl(base + S5PC110_INTC_DMA_MASK); if (status) { status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE); writel(status, base + S5PC110_INTC_DMA_MASK); } writel(src, base + S5PC110_DMA_SRC_ADDR); writel(dst, base + S5PC110_DMA_DST_ADDR); if (direction == S5PC110_DMA_DIR_READ) { writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG); writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG); } else { writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG); writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG); } writel(count, base + S5PC110_DMA_TRANS_SIZE); writel(direction, base + S5PC110_DMA_TRANS_DIR); writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD); wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20)); return 0; } static int s5pc110_read_bufferram(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count) { struct onenand_chip *this = mtd->priv; void __iomem *p; void *buf = (void *) buffer; dma_addr_t dma_src, dma_dst; int err, ofs, page_dma = 0; struct device *dev = &onenand->pdev->dev; p = this->base + area; if (ONENAND_CURRENT_BUFFERRAM(this)) { if (area == ONENAND_DATARAM) p += this->writesize; else p += mtd->oobsize; } if (offset & 3 || (size_t) buf & 3 || !onenand->dma_addr || count != mtd->writesize) goto normal; /* Handle vmalloc address */ if (buf >= high_memory) { struct page *page; if (((size_t) buf & PAGE_MASK) != ((size_t) (buf + count - 1) & PAGE_MASK)) goto normal; page = vmalloc_to_page(buf); if (!page) goto normal; /* Page offset */ ofs = ((size_t) buf & ~PAGE_MASK); page_dma = 1; /* DMA routine */ dma_src = onenand->phys_base + (p - this->base); dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE); } else { /* DMA routine */ dma_src = onenand->phys_base + (p - this->base); dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE); } if (dma_mapping_error(dev, dma_dst)) { dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count); goto normal; } err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src, count, S5PC110_DMA_DIR_READ); if (page_dma) dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE); else dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE); if (!err) return 0; normal: if (count != mtd->writesize) { /* Copy the bufferram to memory to prevent unaligned access */ memcpy(this->page_buf, p, mtd->writesize); p = this->page_buf + offset; } memcpy(buffer, p, count); return 0; } static int s5pc110_chip_probe(struct mtd_info *mtd) { /* Now just return 0 */ return 0; } static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state) { unsigned int flags = INT_ACT | LOAD_CMP; unsigned int stat; unsigned long timeout; /* The 20 msec is enough */ timeout = jiffies + msecs_to_jiffies(20); while (time_before(jiffies, timeout)) { stat = s3c_read_reg(INT_ERR_STAT_OFFSET); if (stat & flags) break; } /* To get correct interrupt status in timeout case */ stat = s3c_read_reg(INT_ERR_STAT_OFFSET); s3c_write_reg(stat, INT_ERR_ACK_OFFSET); if (stat & LD_FAIL_ECC_ERR) { s3c_onenand_reset(); return ONENAND_BBT_READ_ERROR; } if (stat & LOAD_CMP) { int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET); if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { s3c_onenand_reset(); return ONENAND_BBT_READ_ERROR; } } return 0; } static void s3c_onenand_check_lock_status(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; struct device *dev = &onenand->pdev->dev; unsigned int block, end; int tmp; end = this->chipsize >> this->erase_shift; for (block = 0; block < end; block++) { unsigned int mem_addr = onenand->mem_addr(block, 0, 0); tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr)); if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) { dev_err(dev, "block %d is write-protected!\n", block); s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET); } } } static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd) { struct onenand_chip *this = mtd->priv; int start, end, start_mem_addr, end_mem_addr; start = ofs >> this->erase_shift; start_mem_addr = onenand->mem_addr(start, 0, 0); end = start + (len >> this->erase_shift) - 1; end_mem_addr = onenand->mem_addr(end, 0, 0); if (cmd == ONENAND_CMD_LOCK) { s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand, start_mem_addr)); s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand, end_mem_addr)); } else { s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand, start_mem_addr)); s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand, end_mem_addr)); } this->wait(mtd, FL_LOCKING); } static void s3c_unlock_all(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; loff_t ofs = 0; size_t len = this->chipsize; if (this->options & ONENAND_HAS_UNLOCK_ALL) { /* Write unlock command */ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0); /* No need to check return value */ this->wait(mtd, FL_LOCKING); /* Workaround for all block unlock in DDP */ if (!ONENAND_IS_DDP(this)) { s3c_onenand_check_lock_status(mtd); return; } /* All blocks on another chip */ ofs = this->chipsize >> 1; len = this->chipsize >> 1; } s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); s3c_onenand_check_lock_status(mtd); } static void s3c_onenand_setup(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; onenand->mtd = mtd; if (onenand->type == TYPE_S3C6400) { onenand->mem_addr = s3c6400_mem_addr; onenand->cmd_map = s3c64xx_cmd_map; } else if (onenand->type == TYPE_S3C6410) { onenand->mem_addr = s3c6410_mem_addr; onenand->cmd_map = s3c64xx_cmd_map; } else if (onenand->type == TYPE_S5PC100) { onenand->mem_addr = s5pc100_mem_addr; onenand->cmd_map = s5pc1xx_cmd_map; } else if (onenand->type == TYPE_S5PC110) { /* Use generic onenand functions */ this->read_bufferram = s5pc110_read_bufferram; this->chip_probe = s5pc110_chip_probe; return; } else { BUG(); } this->read_word = s3c_onenand_readw; this->write_word = s3c_onenand_writew; this->wait = s3c_onenand_wait; this->bbt_wait = s3c_onenand_bbt_wait; this->unlock_all = s3c_unlock_all; this->command = s3c_onenand_command; this->read_bufferram = onenand_read_bufferram; this->write_bufferram = onenand_write_bufferram; } static int s3c_onenand_probe(struct platform_device *pdev) { struct onenand_platform_data *pdata; struct onenand_chip *this; struct mtd_info *mtd; struct resource *r; int size, err; pdata = pdev->dev.platform_data; /* No need to check pdata. the platform data is optional */ size = sizeof(struct mtd_info) + sizeof(struct onenand_chip); mtd = kzalloc(size, GFP_KERNEL); if (!mtd) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL); if (!onenand) { err = -ENOMEM; goto onenand_fail; } this = (struct onenand_chip *) &mtd[1]; mtd->priv = this; mtd->dev.parent = &pdev->dev; mtd->owner = THIS_MODULE; onenand->pdev = pdev; onenand->type = platform_get_device_id(pdev)->driver_data; s3c_onenand_setup(mtd); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "no memory resource defined\n"); return -ENOENT; goto ahb_resource_failed; } onenand->base_res = request_mem_region(r->start, resource_size(r), pdev->name); if (!onenand->base_res) { dev_err(&pdev->dev, "failed to request memory resource\n"); err = -EBUSY; goto resource_failed; } onenand->base = ioremap(r->start, resource_size(r)); if (!onenand->base) { dev_err(&pdev->dev, "failed to map memory resource\n"); err = -EFAULT; goto ioremap_failed; } /* Set onenand_chip also */ this->base = onenand->base; /* Use runtime badblock check */ this->options |= ONENAND_SKIP_UNLOCK_CHECK; if (onenand->type != TYPE_S5PC110) { r = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!r) { dev_err(&pdev->dev, "no buffer memory resource defined\n"); return -ENOENT; goto ahb_resource_failed; } onenand->ahb_res = request_mem_region(r->start, resource_size(r), pdev->name); if (!onenand->ahb_res) { dev_err(&pdev->dev, "failed to request buffer memory resource\n"); err = -EBUSY; goto ahb_resource_failed; } onenand->ahb_addr = ioremap(r->start, resource_size(r)); if (!onenand->ahb_addr) { dev_err(&pdev->dev, "failed to map buffer memory resource\n"); err = -EINVAL; goto ahb_ioremap_failed; } /* Allocate 4KiB BufferRAM */ onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL); if (!onenand->page_buf) { err = -ENOMEM; goto page_buf_fail; } /* Allocate 128 SpareRAM */ onenand->oob_buf = kzalloc(128, GFP_KERNEL); if (!onenand->oob_buf) { err = -ENOMEM; goto oob_buf_fail; } /* S3C doesn't handle subpage write */ mtd->subpage_sft = 0; this->subpagesize = mtd->writesize; } else { /* S5PC110 */ r = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!r) { dev_err(&pdev->dev, "no dma memory resource defined\n"); return -ENOENT; goto dma_resource_failed; } onenand->dma_res = request_mem_region(r->start, resource_size(r), pdev->name); if (!onenand->dma_res) { dev_err(&pdev->dev, "failed to request dma memory resource\n"); err = -EBUSY; goto dma_resource_failed; } onenand->dma_addr = ioremap(r->start, resource_size(r)); if (!onenand->dma_addr) { dev_err(&pdev->dev, "failed to map dma memory resource\n"); err = -EINVAL; goto dma_ioremap_failed; } onenand->phys_base = onenand->base_res->start; s5pc110_dma_ops = s5pc110_dma_poll; /* Interrupt support */ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (r) { init_completion(&onenand->complete); s5pc110_dma_ops = s5pc110_dma_irq; err = request_irq(r->start, s5pc110_onenand_irq, IRQF_SHARED, "onenand", &onenand); if (err) { dev_err(&pdev->dev, "failed to get irq\n"); goto scan_failed; } } } if (onenand_scan(mtd, 1)) { err = -EFAULT; goto scan_failed; } if (onenand->type != TYPE_S5PC110) { /* S3C doesn't handle subpage write */ mtd->subpage_sft = 0; this->subpagesize = mtd->writesize; } if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); if (err > 0) mtd_device_register(mtd, onenand->parts, err); else if (err <= 0 && pdata && pdata->parts) mtd_device_register(mtd, pdata->parts, pdata->nr_parts); else err = mtd_device_register(mtd, NULL, 0); platform_set_drvdata(pdev, mtd); return 0; scan_failed: if (onenand->dma_addr) iounmap(onenand->dma_addr); dma_ioremap_failed: if (onenand->dma_res) release_mem_region(onenand->dma_res->start, resource_size(onenand->dma_res)); kfree(onenand->oob_buf); oob_buf_fail: kfree(onenand->page_buf); page_buf_fail: if (onenand->ahb_addr) iounmap(onenand->ahb_addr); ahb_ioremap_failed: if (onenand->ahb_res) release_mem_region(onenand->ahb_res->start, resource_size(onenand->ahb_res)); dma_resource_failed: ahb_resource_failed: iounmap(onenand->base); ioremap_failed: if (onenand->base_res) release_mem_region(onenand->base_res->start, resource_size(onenand->base_res)); resource_failed: kfree(onenand); onenand_fail: kfree(mtd); return err; } static int __devexit s3c_onenand_remove(struct platform_device *pdev) { struct mtd_info *mtd = platform_get_drvdata(pdev); onenand_release(mtd); if (onenand->ahb_addr) iounmap(onenand->ahb_addr); if (onenand->ahb_res) release_mem_region(onenand->ahb_res->start, resource_size(onenand->ahb_res)); if (onenand->dma_addr) iounmap(onenand->dma_addr); if (onenand->dma_res) release_mem_region(onenand->dma_res->start, resource_size(onenand->dma_res)); iounmap(onenand->base); release_mem_region(onenand->base_res->start, resource_size(onenand->base_res)); platform_set_drvdata(pdev, NULL); kfree(onenand->oob_buf); kfree(onenand->page_buf); kfree(onenand); kfree(mtd); return 0; } static int s3c_pm_ops_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mtd_info *mtd = platform_get_drvdata(pdev); struct onenand_chip *this = mtd->priv; this->wait(mtd, FL_PM_SUSPENDED); return 0; } static int s3c_pm_ops_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mtd_info *mtd = platform_get_drvdata(pdev); struct onenand_chip *this = mtd->priv; this->unlock_all(mtd); return 0; } static const struct dev_pm_ops s3c_pm_ops = { .suspend = s3c_pm_ops_suspend, .resume = s3c_pm_ops_resume, }; static struct platform_device_id s3c_onenand_driver_ids[] = { { .name = "s3c6400-onenand", .driver_data = TYPE_S3C6400, }, { .name = "s3c6410-onenand", .driver_data = TYPE_S3C6410, }, { .name = "s5pc100-onenand", .driver_data = TYPE_S5PC100, }, { .name = "s5pc110-onenand", .driver_data = TYPE_S5PC110, }, { }, }; MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids); static struct platform_driver s3c_onenand_driver = { .driver = { .name = "samsung-onenand", .pm = &s3c_pm_ops, }, .id_table = s3c_onenand_driver_ids, .probe = s3c_onenand_probe, .remove = __devexit_p(s3c_onenand_remove), }; static int __init s3c_onenand_init(void) { return platform_driver_register(&s3c_onenand_driver); } static void __exit s3c_onenand_exit(void) { platform_driver_unregister(&s3c_onenand_driver); } module_init(s3c_onenand_init); module_exit(s3c_onenand_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>"); MODULE_DESCRIPTION("Samsung OneNAND controller support");
gpl-2.0
CyanogenMod/android_kernel_toshiba_betelgeuse
drivers/isdn/hardware/avm/t1pci.c
3426
6811
/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Module for AVM T1 PCI-card. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/capi.h> #include <linux/init.h> #include <asm/io.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #include <linux/isdn/capilli.h> #include "avmcard.h" #undef CONFIG_T1PCI_DEBUG #undef CONFIG_T1PCI_POLLDEBUG /* ------------------------------------------------------------- */ static char *revision = "$Revision: 1.1.2.2 $"; /* ------------------------------------------------------------- */ static struct pci_device_id t1pci_pci_tbl[] = { { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl); MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ static char *t1pci_procinfo(struct capi_ctr *ctrl); static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev) { avmcard *card; avmctrl_info *cinfo; int retval; card = b1_alloc_card(1); if (!card) { printk(KERN_WARNING "t1pci: no memory.\n"); retval = -ENOMEM; goto err; } card->dma = avmcard_dma_alloc("t1pci", pdev, 2048+128, 2048+128); if (!card->dma) { printk(KERN_WARNING "t1pci: no memory.\n"); retval = -ENOMEM; goto err_free; } cinfo = card->ctrlinfo; sprintf(card->name, "t1pci-%x", p->port); card->port = p->port; card->irq = p->irq; card->membase = p->membase; card->cardtype = avm_t1pci; if (!request_region(card->port, AVMB1_PORTLEN, card->name)) { printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n", card->port, card->port + AVMB1_PORTLEN); retval = -EBUSY; goto err_free_dma; } card->mbase = ioremap(card->membase, 64); if (!card->mbase) { printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n", card->membase); retval = -EIO; goto err_release_region; } b1dma_reset(card); retval = t1pci_detect(card); if (retval != 0) { if (retval < 6) printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n", card->port, retval); else printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n", card->port, retval); retval = -EIO; goto err_unmap; } b1dma_reset(card); retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card); if (retval) { printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq); retval = -EBUSY; goto err_unmap; } cinfo->capi_ctrl.owner = THIS_MODULE; cinfo->capi_ctrl.driver_name = "t1pci"; cinfo->capi_ctrl.driverdata = cinfo; cinfo->capi_ctrl.register_appl = b1dma_register_appl; cinfo->capi_ctrl.release_appl = b1dma_release_appl; cinfo->capi_ctrl.send_message = b1dma_send_message; cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; cinfo->capi_ctrl.procinfo = t1pci_procinfo; cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops; strcpy(cinfo->capi_ctrl.name, card->name); retval = attach_capi_ctr(&cinfo->capi_ctrl); if (retval) { printk(KERN_ERR "t1pci: attach controller failed.\n"); retval = -EBUSY; goto err_free_irq; } card->cardnr = cinfo->capi_ctrl.cnr; printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n", card->port, card->irq, card->membase); pci_set_drvdata(pdev, card); return 0; err_free_irq: free_irq(card->irq, card); err_unmap: iounmap(card->mbase); err_release_region: release_region(card->port, AVMB1_PORTLEN); err_free_dma: avmcard_dma_free(card->dma); err_free: b1_free_card(card); err: return retval; } /* ------------------------------------------------------------- */ static void t1pci_remove(struct pci_dev *pdev) { avmcard *card = pci_get_drvdata(pdev); avmctrl_info *cinfo = card->ctrlinfo; b1dma_reset(card); detach_capi_ctr(&cinfo->capi_ctrl); free_irq(card->irq, card); iounmap(card->mbase); release_region(card->port, AVMB1_PORTLEN); avmcard_dma_free(card->dma); b1_free_card(card); } /* ------------------------------------------------------------- */ static char *t1pci_procinfo(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->port : 0x0, cinfo->card ? cinfo->card->irq : 0, cinfo->card ? cinfo->card->membase : 0 ); return cinfo->infobuf; } /* ------------------------------------------------------------- */ static int __devinit t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct capicardparams param; int retval; if (pci_enable_device(dev) < 0) { printk(KERN_ERR "t1pci: failed to enable AVM-T1-PCI\n"); return -ENODEV; } pci_set_master(dev); param.port = pci_resource_start(dev, 1); param.irq = dev->irq; param.membase = pci_resource_start(dev, 0); printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n", param.port, param.irq, param.membase); retval = t1pci_add_card(&param, dev); if (retval != 0) { printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n", param.port, param.irq, param.membase); pci_disable_device(dev); return -ENODEV; } return 0; } static struct pci_driver t1pci_pci_driver = { .name = "t1pci", .id_table = t1pci_pci_tbl, .probe = t1pci_probe, .remove = t1pci_remove, }; static struct capi_driver capi_driver_t1pci = { .name = "t1pci", .revision = "1.0", }; static int __init t1pci_init(void) { char *p; char rev[32]; int err; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p-1) = 0; } else strcpy(rev, "1.0"); err = pci_register_driver(&t1pci_pci_driver); if (!err) { strlcpy(capi_driver_t1pci.revision, rev, 32); register_capi_driver(&capi_driver_t1pci); printk(KERN_INFO "t1pci: revision %s\n", rev); } return err; } static void __exit t1pci_exit(void) { unregister_capi_driver(&capi_driver_t1pci); pci_unregister_driver(&t1pci_pci_driver); } module_init(t1pci_init); module_exit(t1pci_exit);
gpl-2.0
bluechiptechnology/Rx3_linux_3.0.35_4.0.0
drivers/isdn/hardware/eicon/divasi.c
3426
12326
/* $Id: divasi.c,v 1.25.6.2 2005/01/31 12:22:20 armin Exp $ * * Driver for Eicon DIVA Server ISDN cards. * User Mode IDI Interface * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/seq_file.h> #include <asm/uaccess.h> #include "platform.h" #include "di_defs.h" #include "divasync.h" #include "um_xdi.h" #include "um_idi.h" static char *main_revision = "$Revision: 1.25.6.2 $"; static int major; MODULE_DESCRIPTION("User IDI Interface for Eicon ISDN cards"); MODULE_AUTHOR("Cytronics & Melware, Eicon Networks"); MODULE_SUPPORTED_DEVICE("DIVA card driver"); MODULE_LICENSE("GPL"); typedef struct _diva_um_idi_os_context { wait_queue_head_t read_wait; wait_queue_head_t close_wait; struct timer_list diva_timer_id; int aborted; int adapter_nr; } diva_um_idi_os_context_t; static char *DRIVERNAME = "Eicon DIVA - User IDI (http://www.melware.net)"; static char *DRIVERLNAME = "diva_idi"; static char *DEVNAME = "DivasIDI"; char *DRIVERRELEASE_IDI = "2.0"; extern int idifunc_init(void); extern void idifunc_finit(void); /* * helper functions */ static char *getrev(const char *revision) { char *rev; char *p; if ((p = strchr(revision, ':'))) { rev = p + 2; p = strchr(rev, '$'); *--p = 0; } else rev = "1.0"; return rev; } /* * LOCALS */ static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count, loff_t * offset); static ssize_t um_idi_write(struct file *file, const char __user *buf, size_t count, loff_t * offset); static unsigned int um_idi_poll(struct file *file, poll_table * wait); static int um_idi_open(struct inode *inode, struct file *file); static int um_idi_release(struct inode *inode, struct file *file); static int remove_entity(void *entity); static void diva_um_timer_function(unsigned long data); /* * proc entry */ extern struct proc_dir_entry *proc_net_eicon; static struct proc_dir_entry *um_idi_proc_entry = NULL; static int um_idi_proc_show(struct seq_file *m, void *v) { char tmprev[32]; seq_printf(m, "%s\n", DRIVERNAME); seq_printf(m, "name : %s\n", DRIVERLNAME); seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI); strcpy(tmprev, main_revision); seq_printf(m, "revision : %s\n", getrev(tmprev)); seq_printf(m, "build : %s\n", DIVA_BUILD); seq_printf(m, "major : %d\n", major); return 0; } static int um_idi_proc_open(struct inode *inode, struct file *file) { return single_open(file, um_idi_proc_show, NULL); } static const struct file_operations um_idi_proc_fops = { .owner = THIS_MODULE, .open = um_idi_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int DIVA_INIT_FUNCTION create_um_idi_proc(void) { um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon, &um_idi_proc_fops); if (!um_idi_proc_entry) return (0); return (1); } static void remove_um_idi_proc(void) { if (um_idi_proc_entry) { remove_proc_entry(DRIVERLNAME, proc_net_eicon); um_idi_proc_entry = NULL; } } static const struct file_operations divas_idi_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = um_idi_read, .write = um_idi_write, .poll = um_idi_poll, .open = um_idi_open, .release = um_idi_release }; static void divas_idi_unregister_chrdev(void) { unregister_chrdev(major, DEVNAME); } static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void) { if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0) { printk(KERN_ERR "%s: failed to create /dev entry.\n", DRIVERLNAME); return (0); } return (1); } /* ** Driver Load */ static int DIVA_INIT_FUNCTION divasi_init(void) { char tmprev[50]; int ret = 0; printk(KERN_INFO "%s\n", DRIVERNAME); printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_IDI); strcpy(tmprev, main_revision); printk("%s Build: %s\n", getrev(tmprev), DIVA_BUILD); if (!divas_idi_register_chrdev()) { ret = -EIO; goto out; } if (!create_um_idi_proc()) { divas_idi_unregister_chrdev(); printk(KERN_ERR "%s: failed to create proc entry.\n", DRIVERLNAME); ret = -EIO; goto out; } if (!(idifunc_init())) { remove_um_idi_proc(); divas_idi_unregister_chrdev(); printk(KERN_ERR "%s: failed to connect to DIDD.\n", DRIVERLNAME); ret = -EIO; goto out; } printk(KERN_INFO "%s: started with major %d\n", DRIVERLNAME, major); out: return (ret); } /* ** Driver Unload */ static void DIVA_EXIT_FUNCTION divasi_exit(void) { idifunc_finit(); remove_um_idi_proc(); divas_idi_unregister_chrdev(); printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME); } module_init(divasi_init); module_exit(divasi_exit); /* * FILE OPERATIONS */ static int divas_um_idi_copy_to_user(void *os_handle, void *dst, const void *src, int length) { memcpy(dst, src, length); return (length); } static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { diva_um_idi_os_context_t *p_os; int ret = -EINVAL; void *data; if (!file->private_data) { return (-ENODEV); } if (! (p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file-> private_data))) { return (-ENODEV); } if (p_os->aborted) { return (-ENODEV); } if (!(data = diva_os_malloc(0, count))) { return (-ENOMEM); } ret = diva_um_idi_read(file->private_data, file, data, count, divas_um_idi_copy_to_user); switch (ret) { case 0: /* no message available */ ret = (-EAGAIN); break; case (-1): /* adapter was removed */ ret = (-ENODEV); break; case (-2): /* message_length > length of user buffer */ ret = (-EFAULT); break; } if (ret > 0) { if (copy_to_user(buf, data, ret)) { ret = (-EFAULT); } } diva_os_free(0, data); DBG_TRC(("read: ret %d", ret)); return (ret); } static int divas_um_idi_copy_from_user(void *os_handle, void *dst, const void *src, int length) { memcpy(dst, src, length); return (length); } static int um_idi_open_adapter(struct file *file, int adapter_nr) { diva_um_idi_os_context_t *p_os; void *e = divas_um_idi_create_entity((dword) adapter_nr, (void *) file); if (!(file->private_data = e)) { return (0); } p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e); init_waitqueue_head(&p_os->read_wait); init_waitqueue_head(&p_os->close_wait); init_timer(&p_os->diva_timer_id); p_os->diva_timer_id.function = (void *) diva_um_timer_function; p_os->diva_timer_id.data = (unsigned long) p_os; p_os->aborted = 0; p_os->adapter_nr = adapter_nr; return (1); } static ssize_t um_idi_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { diva_um_idi_os_context_t *p_os; int ret = -EINVAL; void *data; int adapter_nr = 0; if (!file->private_data) { /* the first write() selects the adapter_nr */ if (count == sizeof(int)) { if (copy_from_user ((void *) &adapter_nr, buf, count)) return (-EFAULT); if (!(um_idi_open_adapter(file, adapter_nr))) return (-ENODEV); return (count); } else return (-ENODEV); } if (!(p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file-> private_data))) { return (-ENODEV); } if (p_os->aborted) { return (-ENODEV); } if (!(data = diva_os_malloc(0, count))) { return (-ENOMEM); } if (copy_from_user(data, buf, count)) { ret = -EFAULT; } else { ret = diva_um_idi_write(file->private_data, file, data, count, divas_um_idi_copy_from_user); switch (ret) { case 0: /* no space available */ ret = (-EAGAIN); break; case (-1): /* adapter was removed */ ret = (-ENODEV); break; case (-2): /* length of user buffer > max message_length */ ret = (-EFAULT); break; } } diva_os_free(0, data); DBG_TRC(("write: ret %d", ret)); return (ret); } static unsigned int um_idi_poll(struct file *file, poll_table * wait) { diva_um_idi_os_context_t *p_os; if (!file->private_data) { return (POLLERR); } if ((!(p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->private_data))) || p_os->aborted) { return (POLLERR); } poll_wait(file, &p_os->read_wait, wait); if (p_os->aborted) { return (POLLERR); } switch (diva_user_mode_idi_ind_ready(file->private_data, file)) { case (-1): return (POLLERR); case 0: return (0); } return (POLLIN | POLLRDNORM); } static int um_idi_open(struct inode *inode, struct file *file) { return (0); } static int um_idi_release(struct inode *inode, struct file *file) { diva_um_idi_os_context_t *p_os; unsigned int adapter_nr; int ret = 0; if (!(file->private_data)) { ret = -ENODEV; goto out; } if (!(p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->private_data))) { ret = -ENODEV; goto out; } adapter_nr = p_os->adapter_nr; if ((ret = remove_entity(file->private_data))) { goto out; } if (divas_um_idi_delete_entity ((int) adapter_nr, file->private_data)) { ret = -ENODEV; goto out; } out: return (ret); } int diva_os_get_context_size(void) { return (sizeof(diva_um_idi_os_context_t)); } void diva_os_wakeup_read(void *os_context) { diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) os_context; wake_up_interruptible(&p_os->read_wait); } void diva_os_wakeup_close(void *os_context) { diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) os_context; wake_up_interruptible(&p_os->close_wait); } static void diva_um_timer_function(unsigned long data) { diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data; p_os->aborted = 1; wake_up_interruptible(&p_os->read_wait); wake_up_interruptible(&p_os->close_wait); DBG_ERR(("entity removal watchdog")) } /* ** If application exits without entity removal this function will remove ** entity and block until removal is complete */ static int remove_entity(void *entity) { struct task_struct *curtask = current; diva_um_idi_os_context_t *p_os; diva_um_idi_stop_wdog(entity); if (!entity) { DBG_FTL(("Zero entity on remove")) return (0); } if (!(p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(entity))) { DBG_FTL(("Zero entity os context on remove")) return (0); } if (!divas_um_idi_entity_assigned(entity) || p_os->aborted) { /* Entity is not assigned, also can be removed */ return (0); } DBG_TRC(("E(%08x) check remove", entity)) /* If adapter not answers on remove request inside of 10 Sec, then adapter is dead */ diva_um_idi_start_wdog(entity); { DECLARE_WAITQUEUE(wait, curtask); add_wait_queue(&p_os->close_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!divas_um_idi_entity_start_remove(entity) || p_os->aborted) { break; } schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&p_os->close_wait, &wait); } DBG_TRC(("E(%08x) start remove", entity)) { DECLARE_WAITQUEUE(wait, curtask); add_wait_queue(&p_os->close_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!divas_um_idi_entity_assigned(entity) || p_os->aborted) { break; } schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&p_os->close_wait, &wait); } DBG_TRC(("E(%08x) remove complete, aborted:%d", entity, p_os->aborted)) diva_um_idi_stop_wdog(entity); p_os->aborted = 0; return (0); } /* * timer watchdog */ void diva_um_idi_start_wdog(void *entity) { diva_um_idi_os_context_t *p_os; if (entity && ((p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(entity)))) { mod_timer(&p_os->diva_timer_id, jiffies + 10 * HZ); } } void diva_um_idi_stop_wdog(void *entity) { diva_um_idi_os_context_t *p_os; if (entity && ((p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(entity)))) { del_timer(&p_os->diva_timer_id); } }
gpl-2.0
zeroblade1984/Yureka-64bit
drivers/char/hw_random/tpm-rng.c
4706
1396
/* * Copyright (C) 2012 Kent Yoder IBM Corporation * * HWRNG interfaces to pull RNG data from a TPM * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/hw_random.h> #include <linux/tpm.h> #define MODULE_NAME "tpm-rng" static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { return tpm_get_random(TPM_ANY_NUM, data, max); } static struct hwrng tpm_rng = { .name = MODULE_NAME, .read = tpm_rng_read, }; static int __init rng_init(void) { return hwrng_register(&tpm_rng); } module_init(rng_init); static void __exit rng_exit(void) { hwrng_unregister(&tpm_rng); } module_exit(rng_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>"); MODULE_DESCRIPTION("RNG driver for TPM devices");
gpl-2.0
Forzaferrarileo/fuji-msm8660-JB-3.4.0
arch/arm/mach-kirkwood/ts41x-setup.c
4962
4602
/* * * QNAP TS-410, TS-410U, TS-419P and TS-419U Turbo NAS Board Setup * * Copyright (C) 2009-2010 Martin Michlmayr <tbm@cyrius.com> * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/mv643xx_eth.h> #include <linux/ata_platform.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/input.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/kirkwood.h> #include "common.h" #include "mpp.h" #include "tsx1x-common.h" /* for the PCIe reset workaround */ #include <plat/pcie.h> #define QNAP_TS41X_JUMPER_JP1 45 static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { I2C_BOARD_INFO("s35390a", 0x30), }; static struct mv643xx_eth_platform_data qnap_ts41x_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; static struct mv643xx_eth_platform_data qnap_ts41x_ge01_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(0), }; static struct mv_sata_platform_data qnap_ts41x_sata_data = { .n_ports = 2, }; static struct gpio_keys_button qnap_ts41x_buttons[] = { { .code = KEY_COPY, .gpio = 43, .desc = "USB Copy", .active_low = 1, }, { .code = KEY_RESTART, .gpio = 37, .desc = "Reset", .active_low = 1, }, }; static struct gpio_keys_platform_data qnap_ts41x_button_data = { .buttons = qnap_ts41x_buttons, .nbuttons = ARRAY_SIZE(qnap_ts41x_buttons), }; static struct platform_device qnap_ts41x_button_device = { .name = "gpio-keys", .id = -1, .num_resources = 0, .dev = { .platform_data = &qnap_ts41x_button_data, } }; static unsigned int qnap_ts41x_mpp_config[] __initdata = { MPP0_SPI_SCn, MPP1_SPI_MOSI, MPP2_SPI_SCK, MPP3_SPI_MISO, MPP6_SYSRST_OUTn, MPP7_PEX_RST_OUTn, MPP8_TW0_SDA, MPP9_TW0_SCK, MPP10_UART0_TXD, MPP11_UART0_RXD, MPP13_UART1_TXD, /* PIC controller */ MPP14_UART1_RXD, /* PIC controller */ MPP15_SATA0_ACTn, MPP16_SATA1_ACTn, MPP20_GE1_TXD0, MPP21_GE1_TXD1, MPP22_GE1_TXD2, MPP23_GE1_TXD3, MPP24_GE1_RXD0, MPP25_GE1_RXD1, MPP26_GE1_RXD2, MPP27_GE1_RXD3, MPP30_GE1_RXCTL, MPP31_GE1_RXCLK, MPP32_GE1_TCLKOUT, MPP33_GE1_TXCTL, MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */ MPP37_GPIO, /* Reset button */ MPP43_GPIO, /* USB Copy button */ MPP44_GPIO, /* Board ID: 0: TS-419U, 1: TS-419 */ MPP45_GPIO, /* JP1: 0: LCD, 1: serial console */ MPP46_GPIO, /* External SATA HDD1 error indicator */ MPP47_GPIO, /* External SATA HDD2 error indicator */ MPP48_GPIO, /* External SATA HDD3 error indicator */ MPP49_GPIO, /* External SATA HDD4 error indicator */ 0 }; static void __init qnap_ts41x_init(void) { u32 dev, rev; /* * Basic setup. Needs to be called early. */ kirkwood_init(); kirkwood_mpp_conf(qnap_ts41x_mpp_config); kirkwood_uart0_init(); kirkwood_uart1_init(); /* A PIC controller is connected here. */ qnap_tsx1x_register_flash(); kirkwood_i2c_init(); i2c_register_board_info(0, &qnap_ts41x_i2c_rtc, 1); kirkwood_pcie_id(&dev, &rev); if (dev == MV88F6282_DEV_ID) { qnap_ts41x_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); qnap_ts41x_ge01_data.phy_addr = MV643XX_ETH_PHY_ADDR(1); } kirkwood_ge00_init(&qnap_ts41x_ge00_data); kirkwood_ge01_init(&qnap_ts41x_ge01_data); kirkwood_sata_init(&qnap_ts41x_sata_data); kirkwood_ehci_init(); platform_device_register(&qnap_ts41x_button_device); pm_power_off = qnap_tsx1x_power_off; if (gpio_request(QNAP_TS41X_JUMPER_JP1, "JP1") == 0) gpio_export(QNAP_TS41X_JUMPER_JP1, 0); } static int __init ts41x_pci_init(void) { if (machine_is_ts41x()) { u32 dev, rev; /* * Without this explicit reset, the PCIe SATA controller * (Marvell 88sx7042/sata_mv) is known to stop working * after a few minutes. */ orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE); kirkwood_pcie_id(&dev, &rev); if (dev == MV88F6282_DEV_ID) kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); else kirkwood_pcie_init(KW_PCIE0); } return 0; } subsys_initcall(ts41x_pci_init); MACHINE_START(TS41X, "QNAP TS-41x") /* Maintainer: Martin Michlmayr <tbm@cyrius.com> */ .atag_offset = 0x100, .init_machine = qnap_ts41x_init, .map_io = kirkwood_map_io, .init_early = kirkwood_init_early, .init_irq = kirkwood_init_irq, .timer = &kirkwood_timer, .restart = kirkwood_restart, MACHINE_END
gpl-2.0
IMCG/fastsocket
kernel/arch/um/kernel/sigio.c
4962
1043
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com) * Licensed under the GPL */ #include <linux/interrupt.h> #include "irq_kern.h" #include "os.h" #include "sigio.h" /* Protected by sigio_lock() called from write_sigio_workaround */ static int sigio_irq_fd = -1; static irqreturn_t sigio_interrupt(int irq, void *data) { char c; os_read_file(sigio_irq_fd, &c, sizeof(c)); reactivate_fd(sigio_irq_fd, SIGIO_WRITE_IRQ); return IRQ_HANDLED; } int write_sigio_irq(int fd) { int err; err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt, IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio", NULL); if (err) { printk(KERN_ERR "write_sigio_irq : um_request_irq failed, " "err = %d\n", err); return -1; } sigio_irq_fd = fd; return 0; } /* These are called from os-Linux/sigio.c to protect its pollfds arrays. */ static DEFINE_SPINLOCK(sigio_spinlock); void sigio_lock(void) { spin_lock(&sigio_spinlock); } void sigio_unlock(void) { spin_unlock(&sigio_spinlock); }
gpl-2.0
moresushant48/android_kernel_cyanogen_msm8916
lib/hweight.c
7522
1911
#include <linux/export.h> #include <linux/bitops.h> #include <asm/types.h> /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ unsigned int __sw_hweight32(unsigned int w) { #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; w = (w & 0x33333333) + ((w >> 2) & 0x33333333); w = (w + (w >> 4)) & 0x0f0f0f0f; return (w * 0x01010101) >> 24; #else unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; #endif } EXPORT_SYMBOL(__sw_hweight32); unsigned int __sw_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } EXPORT_SYMBOL(__sw_hweight16); unsigned int __sw_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } EXPORT_SYMBOL(__sw_hweight8); unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 return __sw_hweight32((unsigned int)(w >> 32)) + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; return (w * 0x0101010101010101ul) >> 56; #else __u64 res = w - ((w >> 1) & 0x5555555555555555ul); res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; res = res + (res >> 8); res = res + (res >> 16); return (res + (res >> 32)) & 0x00000000000000FFul; #endif #endif } EXPORT_SYMBOL(__sw_hweight64);
gpl-2.0
SamsungGalaxyS6/kernel_samsung_exynos7420
lib/hweight.c
7522
1911
#include <linux/export.h> #include <linux/bitops.h> #include <asm/types.h> /** * hweightN - returns the hamming weight of a N-bit word * @x: the word to weigh * * The Hamming Weight of a number is the total number of bits set in it. */ unsigned int __sw_hweight32(unsigned int w) { #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; w = (w & 0x33333333) + ((w >> 2) & 0x33333333); w = (w + (w >> 4)) & 0x0f0f0f0f; return (w * 0x01010101) >> 24; #else unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; #endif } EXPORT_SYMBOL(__sw_hweight32); unsigned int __sw_hweight16(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x5555); res = (res & 0x3333) + ((res >> 2) & 0x3333); res = (res + (res >> 4)) & 0x0F0F; return (res + (res >> 8)) & 0x00FF; } EXPORT_SYMBOL(__sw_hweight16); unsigned int __sw_hweight8(unsigned int w) { unsigned int res = w - ((w >> 1) & 0x55); res = (res & 0x33) + ((res >> 2) & 0x33); return (res + (res >> 4)) & 0x0F; } EXPORT_SYMBOL(__sw_hweight8); unsigned long __sw_hweight64(__u64 w) { #if BITS_PER_LONG == 32 return __sw_hweight32((unsigned int)(w >> 32)) + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 #ifdef ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; return (w * 0x0101010101010101ul) >> 56; #else __u64 res = w - ((w >> 1) & 0x5555555555555555ul); res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; res = res + (res >> 8); res = res + (res >> 16); return (res + (res >> 32)) & 0x00000000000000FFul; #endif #endif } EXPORT_SYMBOL(__sw_hweight64);
gpl-2.0
ea4862/ArchiKernel_cm12.1
sound/pci/pcxhr/pcxhr_core.c
8034
37111
/* * Driver for Digigram pcxhr compatible soundcards * * low level interface with interrupt and message handling implementation * * Copyright (c) 2004 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/interrupt.h> #include <asm/io.h> #include <sound/core.h> #include "pcxhr.h" #include "pcxhr_mixer.h" #include "pcxhr_hwdep.h" #include "pcxhr_core.h" /* registers used on the PLX (port 1) */ #define PCXHR_PLX_OFFSET_MIN 0x40 #define PCXHR_PLX_MBOX0 0x40 #define PCXHR_PLX_MBOX1 0x44 #define PCXHR_PLX_MBOX2 0x48 #define PCXHR_PLX_MBOX3 0x4C #define PCXHR_PLX_MBOX4 0x50 #define PCXHR_PLX_MBOX5 0x54 #define PCXHR_PLX_MBOX6 0x58 #define PCXHR_PLX_MBOX7 0x5C #define PCXHR_PLX_L2PCIDB 0x64 #define PCXHR_PLX_IRQCS 0x68 #define PCXHR_PLX_CHIPSC 0x6C /* registers used on the DSP (port 2) */ #define PCXHR_DSP_ICR 0x00 #define PCXHR_DSP_CVR 0x04 #define PCXHR_DSP_ISR 0x08 #define PCXHR_DSP_IVR 0x0C #define PCXHR_DSP_RXH 0x14 #define PCXHR_DSP_TXH 0x14 #define PCXHR_DSP_RXM 0x18 #define PCXHR_DSP_TXM 0x18 #define PCXHR_DSP_RXL 0x1C #define PCXHR_DSP_TXL 0x1C #define PCXHR_DSP_RESET 0x20 #define PCXHR_DSP_OFFSET_MAX 0x20 /* access to the card */ #define PCXHR_PLX 1 #define PCXHR_DSP 2 #if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN) #undef PCXHR_REG_TO_PORT(x) #else #define PCXHR_REG_TO_PORT(x) ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP) #endif #define PCXHR_INPB(mgr,x) inb((mgr)->port[PCXHR_REG_TO_PORT(x)] + (x)) #define PCXHR_INPL(mgr,x) inl((mgr)->port[PCXHR_REG_TO_PORT(x)] + (x)) #define PCXHR_OUTPB(mgr,x,data) outb((data), (mgr)->port[PCXHR_REG_TO_PORT(x)] + (x)) #define PCXHR_OUTPL(mgr,x,data) outl((data), (mgr)->port[PCXHR_REG_TO_PORT(x)] + (x)) /* attention : access the PCXHR_DSP_* registers with inb and outb only ! */ /* params used with PCXHR_PLX_MBOX0 */ #define PCXHR_MBOX0_HF5 (1 << 0) #define PCXHR_MBOX0_HF4 (1 << 1) #define PCXHR_MBOX0_BOOT_HERE (1 << 23) /* params used with PCXHR_PLX_IRQCS */ #define PCXHR_IRQCS_ENABLE_PCIIRQ (1 << 8) #define PCXHR_IRQCS_ENABLE_PCIDB (1 << 9) #define PCXHR_IRQCS_ACTIVE_PCIDB (1 << 13) /* params used with PCXHR_PLX_CHIPSC */ #define PCXHR_CHIPSC_INIT_VALUE 0x100D767E #define PCXHR_CHIPSC_RESET_XILINX (1 << 16) #define PCXHR_CHIPSC_GPI_USERI (1 << 17) #define PCXHR_CHIPSC_DATA_CLK (1 << 24) #define PCXHR_CHIPSC_DATA_IN (1 << 26) /* params used with PCXHR_DSP_ICR */ #define PCXHR_ICR_HI08_RREQ 0x01 #define PCXHR_ICR_HI08_TREQ 0x02 #define PCXHR_ICR_HI08_HDRQ 0x04 #define PCXHR_ICR_HI08_HF0 0x08 #define PCXHR_ICR_HI08_HF1 0x10 #define PCXHR_ICR_HI08_HLEND 0x20 #define PCXHR_ICR_HI08_INIT 0x80 /* params used with PCXHR_DSP_CVR */ #define PCXHR_CVR_HI08_HC 0x80 /* params used with PCXHR_DSP_ISR */ #define PCXHR_ISR_HI08_RXDF 0x01 #define PCXHR_ISR_HI08_TXDE 0x02 #define PCXHR_ISR_HI08_TRDY 0x04 #define PCXHR_ISR_HI08_ERR 0x08 #define PCXHR_ISR_HI08_CHK 0x10 #define PCXHR_ISR_HI08_HREQ 0x80 /* constants used for delay in msec */ #define PCXHR_WAIT_DEFAULT 2 #define PCXHR_WAIT_IT 25 #define PCXHR_WAIT_IT_EXTRA 65 /* * pcxhr_check_reg_bit - wait for the specified bit is set/reset on a register * @reg: register to check * @mask: bit mask * @bit: resultant bit to be checked * @time: time-out of loop in msec * * returns zero if a bit matches, or a negative error code. */ static int pcxhr_check_reg_bit(struct pcxhr_mgr *mgr, unsigned int reg, unsigned char mask, unsigned char bit, int time, unsigned char* read) { int i = 0; unsigned long end_time = jiffies + (time * HZ + 999) / 1000; do { *read = PCXHR_INPB(mgr, reg); if ((*read & mask) == bit) { if (i > 100) snd_printdd("ATTENTION! check_reg(%x) " "loopcount=%d\n", reg, i); return 0; } i++; } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "pcxhr_check_reg_bit: timeout, reg=%x, mask=0x%x, val=%x\n", reg, mask, *read); return -EIO; } /* constants used with pcxhr_check_reg_bit() */ #define PCXHR_TIMEOUT_DSP 200 #define PCXHR_MASK_EXTRA_INFO 0x0000FE #define PCXHR_MASK_IT_HF0 0x000100 #define PCXHR_MASK_IT_HF1 0x000200 #define PCXHR_MASK_IT_NO_HF0_HF1 0x000400 #define PCXHR_MASK_IT_MANAGE_HF5 0x000800 #define PCXHR_MASK_IT_WAIT 0x010000 #define PCXHR_MASK_IT_WAIT_EXTRA 0x020000 #define PCXHR_IT_SEND_BYTE_XILINX (0x0000003C | PCXHR_MASK_IT_HF0) #define PCXHR_IT_TEST_XILINX (0x0000003C | PCXHR_MASK_IT_HF1 | \ PCXHR_MASK_IT_MANAGE_HF5) #define PCXHR_IT_DOWNLOAD_BOOT (0x0000000C | PCXHR_MASK_IT_HF1 | \ PCXHR_MASK_IT_MANAGE_HF5 | \ PCXHR_MASK_IT_WAIT) #define PCXHR_IT_RESET_BOARD_FUNC (0x0000000C | PCXHR_MASK_IT_HF0 | \ PCXHR_MASK_IT_MANAGE_HF5 | \ PCXHR_MASK_IT_WAIT_EXTRA) #define PCXHR_IT_DOWNLOAD_DSP (0x0000000C | \ PCXHR_MASK_IT_MANAGE_HF5 | \ PCXHR_MASK_IT_WAIT) #define PCXHR_IT_DEBUG (0x0000005A | PCXHR_MASK_IT_NO_HF0_HF1) #define PCXHR_IT_RESET_SEMAPHORE (0x0000005C | PCXHR_MASK_IT_NO_HF0_HF1) #define PCXHR_IT_MESSAGE (0x00000074 | PCXHR_MASK_IT_NO_HF0_HF1) #define PCXHR_IT_RESET_CHK (0x00000076 | PCXHR_MASK_IT_NO_HF0_HF1) #define PCXHR_IT_UPDATE_RBUFFER (0x00000078 | PCXHR_MASK_IT_NO_HF0_HF1) static int pcxhr_send_it_dsp(struct pcxhr_mgr *mgr, unsigned int itdsp, int atomic) { int err; unsigned char reg; if (itdsp & PCXHR_MASK_IT_MANAGE_HF5) { /* clear hf5 bit */ PCXHR_OUTPL(mgr, PCXHR_PLX_MBOX0, PCXHR_INPL(mgr, PCXHR_PLX_MBOX0) & ~PCXHR_MBOX0_HF5); } if ((itdsp & PCXHR_MASK_IT_NO_HF0_HF1) == 0) { reg = (PCXHR_ICR_HI08_RREQ | PCXHR_ICR_HI08_TREQ | PCXHR_ICR_HI08_HDRQ); if (itdsp & PCXHR_MASK_IT_HF0) reg |= PCXHR_ICR_HI08_HF0; if (itdsp & PCXHR_MASK_IT_HF1) reg |= PCXHR_ICR_HI08_HF1; PCXHR_OUTPB(mgr, PCXHR_DSP_ICR, reg); } reg = (unsigned char)(((itdsp & PCXHR_MASK_EXTRA_INFO) >> 1) | PCXHR_CVR_HI08_HC); PCXHR_OUTPB(mgr, PCXHR_DSP_CVR, reg); if (itdsp & PCXHR_MASK_IT_WAIT) { if (atomic) mdelay(PCXHR_WAIT_IT); else msleep(PCXHR_WAIT_IT); } if (itdsp & PCXHR_MASK_IT_WAIT_EXTRA) { if (atomic) mdelay(PCXHR_WAIT_IT_EXTRA); else msleep(PCXHR_WAIT_IT); } /* wait for CVR_HI08_HC == 0 */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_CVR, PCXHR_CVR_HI08_HC, 0, PCXHR_TIMEOUT_DSP, &reg); if (err) { snd_printk(KERN_ERR "pcxhr_send_it_dsp : TIMEOUT CVR\n"); return err; } if (itdsp & PCXHR_MASK_IT_MANAGE_HF5) { /* wait for hf5 bit */ err = pcxhr_check_reg_bit(mgr, PCXHR_PLX_MBOX0, PCXHR_MBOX0_HF5, PCXHR_MBOX0_HF5, PCXHR_TIMEOUT_DSP, &reg); if (err) { snd_printk(KERN_ERR "pcxhr_send_it_dsp : TIMEOUT HF5\n"); return err; } } return 0; /* retry not handled here */ } void pcxhr_reset_xilinx_com(struct pcxhr_mgr *mgr) { /* reset second xilinx */ PCXHR_OUTPL(mgr, PCXHR_PLX_CHIPSC, PCXHR_CHIPSC_INIT_VALUE & ~PCXHR_CHIPSC_RESET_XILINX); } static void pcxhr_enable_irq(struct pcxhr_mgr *mgr, int enable) { unsigned int reg = PCXHR_INPL(mgr, PCXHR_PLX_IRQCS); /* enable/disable interrupts */ if (enable) reg |= (PCXHR_IRQCS_ENABLE_PCIIRQ | PCXHR_IRQCS_ENABLE_PCIDB); else reg &= ~(PCXHR_IRQCS_ENABLE_PCIIRQ | PCXHR_IRQCS_ENABLE_PCIDB); PCXHR_OUTPL(mgr, PCXHR_PLX_IRQCS, reg); } void pcxhr_reset_dsp(struct pcxhr_mgr *mgr) { /* disable interrupts */ pcxhr_enable_irq(mgr, 0); /* let's reset the DSP */ PCXHR_OUTPB(mgr, PCXHR_DSP_RESET, 0); msleep( PCXHR_WAIT_DEFAULT ); /* wait 2 msec */ PCXHR_OUTPB(mgr, PCXHR_DSP_RESET, 3); msleep( PCXHR_WAIT_DEFAULT ); /* wait 2 msec */ /* reset mailbox */ PCXHR_OUTPL(mgr, PCXHR_PLX_MBOX0, 0); } void pcxhr_enable_dsp(struct pcxhr_mgr *mgr) { /* enable interrupts */ pcxhr_enable_irq(mgr, 1); } /* * load the xilinx image */ int pcxhr_load_xilinx_binary(struct pcxhr_mgr *mgr, const struct firmware *xilinx, int second) { unsigned int i; unsigned int chipsc; unsigned char data; unsigned char mask; const unsigned char *image; /* test first xilinx */ chipsc = PCXHR_INPL(mgr, PCXHR_PLX_CHIPSC); /* REV01 cards do not support the PCXHR_CHIPSC_GPI_USERI bit anymore */ /* this bit will always be 1; * no possibility to test presence of first xilinx */ if(second) { if ((chipsc & PCXHR_CHIPSC_GPI_USERI) == 0) { snd_printk(KERN_ERR "error loading first xilinx\n"); return -EINVAL; } /* activate second xilinx */ chipsc |= PCXHR_CHIPSC_RESET_XILINX; PCXHR_OUTPL(mgr, PCXHR_PLX_CHIPSC, chipsc); msleep( PCXHR_WAIT_DEFAULT ); /* wait 2 msec */ } image = xilinx->data; for (i = 0; i < xilinx->size; i++, image++) { data = *image; mask = 0x80; while (mask) { chipsc &= ~(PCXHR_CHIPSC_DATA_CLK | PCXHR_CHIPSC_DATA_IN); if (data & mask) chipsc |= PCXHR_CHIPSC_DATA_IN; PCXHR_OUTPL(mgr, PCXHR_PLX_CHIPSC, chipsc); chipsc |= PCXHR_CHIPSC_DATA_CLK; PCXHR_OUTPL(mgr, PCXHR_PLX_CHIPSC, chipsc); mask >>= 1; } /* don't take too much time in this loop... */ cond_resched(); } chipsc &= ~(PCXHR_CHIPSC_DATA_CLK | PCXHR_CHIPSC_DATA_IN); PCXHR_OUTPL(mgr, PCXHR_PLX_CHIPSC, chipsc); /* wait 2 msec (time to boot the xilinx before any access) */ msleep( PCXHR_WAIT_DEFAULT ); return 0; } /* * send an executable file to the DSP */ static int pcxhr_download_dsp(struct pcxhr_mgr *mgr, const struct firmware *dsp) { int err; unsigned int i; unsigned int len; const unsigned char *data; unsigned char dummy; /* check the length of boot image */ if (dsp->size <= 0) return -EINVAL; if (dsp->size % 3) return -EINVAL; if (snd_BUG_ON(!dsp->data)) return -EINVAL; /* transfert data buffer from PC to DSP */ for (i = 0; i < dsp->size; i += 3) { data = dsp->data + i; if (i == 0) { /* test data header consistency */ len = (unsigned int)((data[0]<<16) + (data[1]<<8) + data[2]); if (len && (dsp->size != (len + 2) * 3)) return -EINVAL; } /* wait DSP ready for new transfer */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_TRDY, PCXHR_ISR_HI08_TRDY, PCXHR_TIMEOUT_DSP, &dummy); if (err) { snd_printk(KERN_ERR "dsp loading error at position %d\n", i); return err; } /* send host data */ PCXHR_OUTPB(mgr, PCXHR_DSP_TXH, data[0]); PCXHR_OUTPB(mgr, PCXHR_DSP_TXM, data[1]); PCXHR_OUTPB(mgr, PCXHR_DSP_TXL, data[2]); /* don't take too much time in this loop... */ cond_resched(); } /* give some time to boot the DSP */ msleep(PCXHR_WAIT_DEFAULT); return 0; } /* * load the eeprom image */ int pcxhr_load_eeprom_binary(struct pcxhr_mgr *mgr, const struct firmware *eeprom) { int err; unsigned char reg; /* init value of the ICR register */ reg = PCXHR_ICR_HI08_RREQ | PCXHR_ICR_HI08_TREQ | PCXHR_ICR_HI08_HDRQ; if (PCXHR_INPL(mgr, PCXHR_PLX_MBOX0) & PCXHR_MBOX0_BOOT_HERE) { /* no need to load the eeprom binary, * but init the HI08 interface */ PCXHR_OUTPB(mgr, PCXHR_DSP_ICR, reg | PCXHR_ICR_HI08_INIT); msleep(PCXHR_WAIT_DEFAULT); PCXHR_OUTPB(mgr, PCXHR_DSP_ICR, reg); msleep(PCXHR_WAIT_DEFAULT); snd_printdd("no need to load eeprom boot\n"); return 0; } PCXHR_OUTPB(mgr, PCXHR_DSP_ICR, reg); err = pcxhr_download_dsp(mgr, eeprom); if (err) return err; /* wait for chk bit */ return pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_CHK, PCXHR_ISR_HI08_CHK, PCXHR_TIMEOUT_DSP, &reg); } /* * load the boot image */ int pcxhr_load_boot_binary(struct pcxhr_mgr *mgr, const struct firmware *boot) { int err; unsigned int physaddr = mgr->hostport.addr; unsigned char dummy; /* send the hostport address to the DSP (only the upper 24 bit !) */ if (snd_BUG_ON(physaddr & 0xff)) return -EINVAL; PCXHR_OUTPL(mgr, PCXHR_PLX_MBOX1, (physaddr >> 8)); err = pcxhr_send_it_dsp(mgr, PCXHR_IT_DOWNLOAD_BOOT, 0); if (err) return err; /* clear hf5 bit */ PCXHR_OUTPL(mgr, PCXHR_PLX_MBOX0, PCXHR_INPL(mgr, PCXHR_PLX_MBOX0) & ~PCXHR_MBOX0_HF5); err = pcxhr_download_dsp(mgr, boot); if (err) return err; /* wait for hf5 bit */ return pcxhr_check_reg_bit(mgr, PCXHR_PLX_MBOX0, PCXHR_MBOX0_HF5, PCXHR_MBOX0_HF5, PCXHR_TIMEOUT_DSP, &dummy); } /* * load the final dsp image */ int pcxhr_load_dsp_binary(struct pcxhr_mgr *mgr, const struct firmware *dsp) { int err; unsigned char dummy; err = pcxhr_send_it_dsp(mgr, PCXHR_IT_RESET_BOARD_FUNC, 0); if (err) return err; err = pcxhr_send_it_dsp(mgr, PCXHR_IT_DOWNLOAD_DSP, 0); if (err) return err; err = pcxhr_download_dsp(mgr, dsp); if (err) return err; /* wait for chk bit */ return pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_CHK, PCXHR_ISR_HI08_CHK, PCXHR_TIMEOUT_DSP, &dummy); } struct pcxhr_cmd_info { u32 opcode; /* command word */ u16 st_length; /* status length */ u16 st_type; /* status type (RMH_SSIZE_XXX) */ }; /* RMH status type */ enum { RMH_SSIZE_FIXED = 0, /* status size fix (st_length = 0..x) */ RMH_SSIZE_ARG = 1, /* status size given in the LSB byte */ RMH_SSIZE_MASK = 2, /* status size given in bitmask */ }; /* * Array of DSP commands */ static struct pcxhr_cmd_info pcxhr_dsp_cmds[] = { [CMD_VERSION] = { 0x010000, 1, RMH_SSIZE_FIXED }, [CMD_SUPPORTED] = { 0x020000, 4, RMH_SSIZE_FIXED }, [CMD_TEST_IT] = { 0x040000, 1, RMH_SSIZE_FIXED }, [CMD_SEND_IRQA] = { 0x070001, 0, RMH_SSIZE_FIXED }, [CMD_ACCESS_IO_WRITE] = { 0x090000, 1, RMH_SSIZE_ARG }, [CMD_ACCESS_IO_READ] = { 0x094000, 1, RMH_SSIZE_ARG }, [CMD_ASYNC] = { 0x0a0000, 1, RMH_SSIZE_ARG }, [CMD_MODIFY_CLOCK] = { 0x0d0000, 0, RMH_SSIZE_FIXED }, [CMD_RESYNC_AUDIO_INPUTS] = { 0x0e0000, 0, RMH_SSIZE_FIXED }, [CMD_GET_DSP_RESOURCES] = { 0x100000, 4, RMH_SSIZE_FIXED }, [CMD_SET_TIMER_INTERRUPT] = { 0x110000, 0, RMH_SSIZE_FIXED }, [CMD_RES_PIPE] = { 0x400000, 0, RMH_SSIZE_FIXED }, [CMD_FREE_PIPE] = { 0x410000, 0, RMH_SSIZE_FIXED }, [CMD_CONF_PIPE] = { 0x422101, 0, RMH_SSIZE_FIXED }, [CMD_STOP_PIPE] = { 0x470004, 0, RMH_SSIZE_FIXED }, [CMD_PIPE_SAMPLE_COUNT] = { 0x49a000, 2, RMH_SSIZE_FIXED }, [CMD_CAN_START_PIPE] = { 0x4b0000, 1, RMH_SSIZE_FIXED }, [CMD_START_STREAM] = { 0x802000, 0, RMH_SSIZE_FIXED }, [CMD_STREAM_OUT_LEVEL_ADJUST] = { 0x822000, 0, RMH_SSIZE_FIXED }, [CMD_STOP_STREAM] = { 0x832000, 0, RMH_SSIZE_FIXED }, [CMD_UPDATE_R_BUFFERS] = { 0x840000, 0, RMH_SSIZE_FIXED }, [CMD_FORMAT_STREAM_OUT] = { 0x860000, 0, RMH_SSIZE_FIXED }, [CMD_FORMAT_STREAM_IN] = { 0x870000, 0, RMH_SSIZE_FIXED }, [CMD_STREAM_SAMPLE_COUNT] = { 0x902000, 2, RMH_SSIZE_FIXED }, [CMD_AUDIO_LEVEL_ADJUST] = { 0xc22000, 0, RMH_SSIZE_FIXED }, }; #ifdef CONFIG_SND_DEBUG_VERBOSE static char* cmd_names[] = { [CMD_VERSION] = "CMD_VERSION", [CMD_SUPPORTED] = "CMD_SUPPORTED", [CMD_TEST_IT] = "CMD_TEST_IT", [CMD_SEND_IRQA] = "CMD_SEND_IRQA", [CMD_ACCESS_IO_WRITE] = "CMD_ACCESS_IO_WRITE", [CMD_ACCESS_IO_READ] = "CMD_ACCESS_IO_READ", [CMD_ASYNC] = "CMD_ASYNC", [CMD_MODIFY_CLOCK] = "CMD_MODIFY_CLOCK", [CMD_RESYNC_AUDIO_INPUTS] = "CMD_RESYNC_AUDIO_INPUTS", [CMD_GET_DSP_RESOURCES] = "CMD_GET_DSP_RESOURCES", [CMD_SET_TIMER_INTERRUPT] = "CMD_SET_TIMER_INTERRUPT", [CMD_RES_PIPE] = "CMD_RES_PIPE", [CMD_FREE_PIPE] = "CMD_FREE_PIPE", [CMD_CONF_PIPE] = "CMD_CONF_PIPE", [CMD_STOP_PIPE] = "CMD_STOP_PIPE", [CMD_PIPE_SAMPLE_COUNT] = "CMD_PIPE_SAMPLE_COUNT", [CMD_CAN_START_PIPE] = "CMD_CAN_START_PIPE", [CMD_START_STREAM] = "CMD_START_STREAM", [CMD_STREAM_OUT_LEVEL_ADJUST] = "CMD_STREAM_OUT_LEVEL_ADJUST", [CMD_STOP_STREAM] = "CMD_STOP_STREAM", [CMD_UPDATE_R_BUFFERS] = "CMD_UPDATE_R_BUFFERS", [CMD_FORMAT_STREAM_OUT] = "CMD_FORMAT_STREAM_OUT", [CMD_FORMAT_STREAM_IN] = "CMD_FORMAT_STREAM_IN", [CMD_STREAM_SAMPLE_COUNT] = "CMD_STREAM_SAMPLE_COUNT", [CMD_AUDIO_LEVEL_ADJUST] = "CMD_AUDIO_LEVEL_ADJUST", }; #endif static int pcxhr_read_rmh_status(struct pcxhr_mgr *mgr, struct pcxhr_rmh *rmh) { int err; int i; u32 data; u32 size_mask; unsigned char reg; int max_stat_len; if (rmh->stat_len < PCXHR_SIZE_MAX_STATUS) max_stat_len = PCXHR_SIZE_MAX_STATUS; else max_stat_len = rmh->stat_len; for (i = 0; i < rmh->stat_len; i++) { /* wait for receiver full */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_RXDF, PCXHR_ISR_HI08_RXDF, PCXHR_TIMEOUT_DSP, &reg); if (err) { snd_printk(KERN_ERR "ERROR RMH stat: " "ISR:RXDF=1 (ISR = %x; i=%d )\n", reg, i); return err; } /* read data */ data = PCXHR_INPB(mgr, PCXHR_DSP_TXH) << 16; data |= PCXHR_INPB(mgr, PCXHR_DSP_TXM) << 8; data |= PCXHR_INPB(mgr, PCXHR_DSP_TXL); /* need to update rmh->stat_len on the fly ?? */ if (!i) { if (rmh->dsp_stat != RMH_SSIZE_FIXED) { if (rmh->dsp_stat == RMH_SSIZE_ARG) { rmh->stat_len = (data & 0x0000ff) + 1; data &= 0xffff00; } else { /* rmh->dsp_stat == RMH_SSIZE_MASK */ rmh->stat_len = 1; size_mask = data; while (size_mask) { if (size_mask & 1) rmh->stat_len++; size_mask >>= 1; } } } } #ifdef CONFIG_SND_DEBUG_VERBOSE if (rmh->cmd_idx < CMD_LAST_INDEX) snd_printdd(" stat[%d]=%x\n", i, data); #endif if (i < max_stat_len) rmh->stat[i] = data; } if (rmh->stat_len > max_stat_len) { snd_printdd("PCXHR : rmh->stat_len=%x too big\n", rmh->stat_len); rmh->stat_len = max_stat_len; } return 0; } static int pcxhr_send_msg_nolock(struct pcxhr_mgr *mgr, struct pcxhr_rmh *rmh) { int err; int i; u32 data; unsigned char reg; if (snd_BUG_ON(rmh->cmd_len >= PCXHR_SIZE_MAX_CMD)) return -EINVAL; err = pcxhr_send_it_dsp(mgr, PCXHR_IT_MESSAGE, 1); if (err) { snd_printk(KERN_ERR "pcxhr_send_message : ED_DSP_CRASHED\n"); return err; } /* wait for chk bit */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_CHK, PCXHR_ISR_HI08_CHK, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; /* reset irq chk */ err = pcxhr_send_it_dsp(mgr, PCXHR_IT_RESET_CHK, 1); if (err) return err; /* wait for chk bit == 0*/ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_CHK, 0, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; data = rmh->cmd[0]; if (rmh->cmd_len > 1) data |= 0x008000; /* MASK_MORE_THAN_1_WORD_COMMAND */ else data &= 0xff7fff; /* MASK_1_WORD_COMMAND */ #ifdef CONFIG_SND_DEBUG_VERBOSE if (rmh->cmd_idx < CMD_LAST_INDEX) snd_printdd("MSG cmd[0]=%x (%s)\n", data, cmd_names[rmh->cmd_idx]); #endif err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_TRDY, PCXHR_ISR_HI08_TRDY, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; PCXHR_OUTPB(mgr, PCXHR_DSP_TXH, (data>>16)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXM, (data>>8)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXL, (data&0xFF)); if (rmh->cmd_len > 1) { /* send length */ data = rmh->cmd_len - 1; err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_TRDY, PCXHR_ISR_HI08_TRDY, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; PCXHR_OUTPB(mgr, PCXHR_DSP_TXH, (data>>16)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXM, (data>>8)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXL, (data&0xFF)); for (i=1; i < rmh->cmd_len; i++) { /* send other words */ data = rmh->cmd[i]; #ifdef CONFIG_SND_DEBUG_VERBOSE if (rmh->cmd_idx < CMD_LAST_INDEX) snd_printdd(" cmd[%d]=%x\n", i, data); #endif err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_TRDY, PCXHR_ISR_HI08_TRDY, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; PCXHR_OUTPB(mgr, PCXHR_DSP_TXH, (data>>16)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXM, (data>>8)&0xFF); PCXHR_OUTPB(mgr, PCXHR_DSP_TXL, (data&0xFF)); } } /* wait for chk bit */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_CHK, PCXHR_ISR_HI08_CHK, PCXHR_TIMEOUT_DSP, &reg); if (err) return err; /* test status ISR */ if (reg & PCXHR_ISR_HI08_ERR) { /* ERROR, wait for receiver full */ err = pcxhr_check_reg_bit(mgr, PCXHR_DSP_ISR, PCXHR_ISR_HI08_RXDF, PCXHR_ISR_HI08_RXDF, PCXHR_TIMEOUT_DSP, &reg); if (err) { snd_printk(KERN_ERR "ERROR RMH: ISR:RXDF=1 (ISR = %x)\n", reg); return err; } /* read error code */ data = PCXHR_INPB(mgr, PCXHR_DSP_TXH) << 16; data |= PCXHR_INPB(mgr, PCXHR_DSP_TXM) << 8; data |= PCXHR_INPB(mgr, PCXHR_DSP_TXL); snd_printk(KERN_ERR "ERROR RMH(%d): 0x%x\n", rmh->cmd_idx, data); err = -EINVAL; } else { /* read the response data */ err = pcxhr_read_rmh_status(mgr, rmh); } /* reset semaphore */ if (pcxhr_send_it_dsp(mgr, PCXHR_IT_RESET_SEMAPHORE, 1) < 0) return -EIO; return err; } /** * pcxhr_init_rmh - initialize the RMH instance * @rmh: the rmh pointer to be initialized * @cmd: the rmh command to be set */ void pcxhr_init_rmh(struct pcxhr_rmh *rmh, int cmd) { if (snd_BUG_ON(cmd >= CMD_LAST_INDEX)) return; rmh->cmd[0] = pcxhr_dsp_cmds[cmd].opcode; rmh->cmd_len = 1; rmh->stat_len = pcxhr_dsp_cmds[cmd].st_length; rmh->dsp_stat = pcxhr_dsp_cmds[cmd].st_type; rmh->cmd_idx = cmd; } void pcxhr_set_pipe_cmd_params(struct pcxhr_rmh *rmh, int capture, unsigned int param1, unsigned int param2, unsigned int param3) { snd_BUG_ON(param1 > MASK_FIRST_FIELD); if (capture) rmh->cmd[0] |= 0x800; /* COMMAND_RECORD_MASK */ if (param1) rmh->cmd[0] |= (param1 << FIELD_SIZE); if (param2) { snd_BUG_ON(param2 > MASK_FIRST_FIELD); rmh->cmd[0] |= param2; } if(param3) { snd_BUG_ON(param3 > MASK_DSP_WORD); rmh->cmd[1] = param3; rmh->cmd_len = 2; } } /* * pcxhr_send_msg - send a DSP message with spinlock * @rmh: the rmh record to send and receive * * returns 0 if successful, or a negative error code. */ int pcxhr_send_msg(struct pcxhr_mgr *mgr, struct pcxhr_rmh *rmh) { unsigned long flags; int err; spin_lock_irqsave(&mgr->msg_lock, flags); err = pcxhr_send_msg_nolock(mgr, rmh); spin_unlock_irqrestore(&mgr->msg_lock, flags); return err; } static inline int pcxhr_pipes_running(struct pcxhr_mgr *mgr) { int start_mask = PCXHR_INPL(mgr, PCXHR_PLX_MBOX2); /* least segnificant 12 bits are the pipe states * for the playback audios * next 12 bits are the pipe states for the capture audios * (PCXHR_PIPE_STATE_CAPTURE_OFFSET) */ start_mask &= 0xffffff; snd_printdd("CMD_PIPE_STATE MBOX2=0x%06x\n", start_mask); return start_mask; } #define PCXHR_PIPE_STATE_CAPTURE_OFFSET 12 #define MAX_WAIT_FOR_DSP 20 static int pcxhr_prepair_pipe_start(struct pcxhr_mgr *mgr, int audio_mask, int *retry) { struct pcxhr_rmh rmh; int err; int audio = 0; *retry = 0; while (audio_mask) { if (audio_mask & 1) { pcxhr_init_rmh(&rmh, CMD_CAN_START_PIPE); if (audio < PCXHR_PIPE_STATE_CAPTURE_OFFSET) { /* can start playback pipe */ pcxhr_set_pipe_cmd_params(&rmh, 0, audio, 0, 0); } else { /* can start capture pipe */ pcxhr_set_pipe_cmd_params(&rmh, 1, audio - PCXHR_PIPE_STATE_CAPTURE_OFFSET, 0, 0); } err = pcxhr_send_msg(mgr, &rmh); if (err) { snd_printk(KERN_ERR "error pipe start " "(CMD_CAN_START_PIPE) err=%x!\n", err); return err; } /* if the pipe couldn't be prepaired for start, * retry it later */ if (rmh.stat[0] == 0) *retry |= (1<<audio); } audio_mask>>=1; audio++; } return 0; } static int pcxhr_stop_pipes(struct pcxhr_mgr *mgr, int audio_mask) { struct pcxhr_rmh rmh; int err; int audio = 0; while (audio_mask) { if (audio_mask & 1) { pcxhr_init_rmh(&rmh, CMD_STOP_PIPE); if (audio < PCXHR_PIPE_STATE_CAPTURE_OFFSET) { /* stop playback pipe */ pcxhr_set_pipe_cmd_params(&rmh, 0, audio, 0, 0); } else { /* stop capture pipe */ pcxhr_set_pipe_cmd_params(&rmh, 1, audio - PCXHR_PIPE_STATE_CAPTURE_OFFSET, 0, 0); } err = pcxhr_send_msg(mgr, &rmh); if (err) { snd_printk(KERN_ERR "error pipe stop " "(CMD_STOP_PIPE) err=%x!\n", err); return err; } } audio_mask>>=1; audio++; } return 0; } static int pcxhr_toggle_pipes(struct pcxhr_mgr *mgr, int audio_mask) { struct pcxhr_rmh rmh; int err; int audio = 0; while (audio_mask) { if (audio_mask & 1) { pcxhr_init_rmh(&rmh, CMD_CONF_PIPE); if (audio < PCXHR_PIPE_STATE_CAPTURE_OFFSET) pcxhr_set_pipe_cmd_params(&rmh, 0, 0, 0, 1 << audio); else pcxhr_set_pipe_cmd_params(&rmh, 1, 0, 0, 1 << (audio - PCXHR_PIPE_STATE_CAPTURE_OFFSET)); err = pcxhr_send_msg(mgr, &rmh); if (err) { snd_printk(KERN_ERR "error pipe start " "(CMD_CONF_PIPE) err=%x!\n", err); return err; } } audio_mask>>=1; audio++; } /* now fire the interrupt on the card */ pcxhr_init_rmh(&rmh, CMD_SEND_IRQA); err = pcxhr_send_msg(mgr, &rmh); if (err) { snd_printk(KERN_ERR "error pipe start (CMD_SEND_IRQA) err=%x!\n", err); return err; } return 0; } int pcxhr_set_pipe_state(struct pcxhr_mgr *mgr, int playback_mask, int capture_mask, int start) { int state, i, err; int audio_mask; #ifdef CONFIG_SND_DEBUG_VERBOSE struct timeval my_tv1, my_tv2; do_gettimeofday(&my_tv1); #endif audio_mask = (playback_mask | (capture_mask << PCXHR_PIPE_STATE_CAPTURE_OFFSET)); /* current pipe state (playback + record) */ state = pcxhr_pipes_running(mgr); snd_printdd("pcxhr_set_pipe_state %s (mask %x current %x)\n", start ? "START" : "STOP", audio_mask, state); if (start) { /* start only pipes that are not yet started */ audio_mask &= ~state; state = audio_mask; for (i = 0; i < MAX_WAIT_FOR_DSP; i++) { err = pcxhr_prepair_pipe_start(mgr, state, &state); if (err) return err; if (state == 0) break; /* success, all pipes prepaired */ mdelay(1); /* wait 1 millisecond and retry */ } } else { audio_mask &= state; /* stop only pipes that are started */ } if (audio_mask == 0) return 0; err = pcxhr_toggle_pipes(mgr, audio_mask); if (err) return err; i = 0; while (1) { state = pcxhr_pipes_running(mgr); /* have all pipes the new state ? */ if ((state & audio_mask) == (start ? audio_mask : 0)) break; if (++i >= MAX_WAIT_FOR_DSP * 100) { snd_printk(KERN_ERR "error pipe start/stop\n"); return -EBUSY; } udelay(10); /* wait 10 microseconds */ } if (!start) { err = pcxhr_stop_pipes(mgr, audio_mask); if (err) return err; } #ifdef CONFIG_SND_DEBUG_VERBOSE do_gettimeofday(&my_tv2); snd_printdd("***SET PIPE STATE*** TIME = %ld (err = %x)\n", (long)(my_tv2.tv_usec - my_tv1.tv_usec), err); #endif return 0; } int pcxhr_write_io_num_reg_cont(struct pcxhr_mgr *mgr, unsigned int mask, unsigned int value, int *changed) { struct pcxhr_rmh rmh; unsigned long flags; int err; spin_lock_irqsave(&mgr->msg_lock, flags); if ((mgr->io_num_reg_cont & mask) == value) { snd_printdd("IO_NUM_REG_CONT mask %x already is set to %x\n", mask, value); if (changed) *changed = 0; spin_unlock_irqrestore(&mgr->msg_lock, flags); return 0; /* already programmed */ } pcxhr_init_rmh(&rmh, CMD_ACCESS_IO_WRITE); rmh.cmd[0] |= IO_NUM_REG_CONT; rmh.cmd[1] = mask; rmh.cmd[2] = value; rmh.cmd_len = 3; err = pcxhr_send_msg_nolock(mgr, &rmh); if (err == 0) { mgr->io_num_reg_cont &= ~mask; mgr->io_num_reg_cont |= value; if (changed) *changed = 1; } spin_unlock_irqrestore(&mgr->msg_lock, flags); return err; } #define PCXHR_IRQ_TIMER 0x000300 #define PCXHR_IRQ_FREQ_CHANGE 0x000800 #define PCXHR_IRQ_TIME_CODE 0x001000 #define PCXHR_IRQ_NOTIFY 0x002000 #define PCXHR_IRQ_ASYNC 0x008000 #define PCXHR_IRQ_MASK 0x00bb00 #define PCXHR_FATAL_DSP_ERR 0xff0000 enum pcxhr_async_err_src { PCXHR_ERR_PIPE, PCXHR_ERR_STREAM, PCXHR_ERR_AUDIO }; static int pcxhr_handle_async_err(struct pcxhr_mgr *mgr, u32 err, enum pcxhr_async_err_src err_src, int pipe, int is_capture) { #ifdef CONFIG_SND_DEBUG_VERBOSE static char* err_src_name[] = { [PCXHR_ERR_PIPE] = "Pipe", [PCXHR_ERR_STREAM] = "Stream", [PCXHR_ERR_AUDIO] = "Audio" }; #endif if (err & 0xfff) err &= 0xfff; else err = ((err >> 12) & 0xfff); if (!err) return 0; snd_printdd("CMD_ASYNC : Error %s %s Pipe %d err=%x\n", err_src_name[err_src], is_capture ? "Record" : "Play", pipe, err); if (err == 0xe01) mgr->async_err_stream_xrun++; else if (err == 0xe10) mgr->async_err_pipe_xrun++; else mgr->async_err_other_last = (int)err; return 1; } void pcxhr_msg_tasklet(unsigned long arg) { struct pcxhr_mgr *mgr = (struct pcxhr_mgr *)(arg); struct pcxhr_rmh *prmh = mgr->prmh; int err; int i, j; if (mgr->src_it_dsp & PCXHR_IRQ_FREQ_CHANGE) snd_printdd("TASKLET : PCXHR_IRQ_FREQ_CHANGE event occurred\n"); if (mgr->src_it_dsp & PCXHR_IRQ_TIME_CODE) snd_printdd("TASKLET : PCXHR_IRQ_TIME_CODE event occurred\n"); if (mgr->src_it_dsp & PCXHR_IRQ_NOTIFY) snd_printdd("TASKLET : PCXHR_IRQ_NOTIFY event occurred\n"); if (mgr->src_it_dsp & (PCXHR_IRQ_FREQ_CHANGE | PCXHR_IRQ_TIME_CODE)) { /* clear events FREQ_CHANGE and TIME_CODE */ pcxhr_init_rmh(prmh, CMD_TEST_IT); err = pcxhr_send_msg(mgr, prmh); snd_printdd("CMD_TEST_IT : err=%x, stat=%x\n", err, prmh->stat[0]); } if (mgr->src_it_dsp & PCXHR_IRQ_ASYNC) { snd_printdd("TASKLET : PCXHR_IRQ_ASYNC event occurred\n"); pcxhr_init_rmh(prmh, CMD_ASYNC); prmh->cmd[0] |= 1; /* add SEL_ASYNC_EVENTS */ /* this is the only one extra long response command */ prmh->stat_len = PCXHR_SIZE_MAX_LONG_STATUS; err = pcxhr_send_msg(mgr, prmh); if (err) snd_printk(KERN_ERR "ERROR pcxhr_msg_tasklet=%x;\n", err); i = 1; while (i < prmh->stat_len) { int nb_audio = ((prmh->stat[i] >> FIELD_SIZE) & MASK_FIRST_FIELD); int nb_stream = ((prmh->stat[i] >> (2*FIELD_SIZE)) & MASK_FIRST_FIELD); int pipe = prmh->stat[i] & MASK_FIRST_FIELD; int is_capture = prmh->stat[i] & 0x400000; u32 err2; if (prmh->stat[i] & 0x800000) { /* if BIT_END */ snd_printdd("TASKLET : End%sPipe %d\n", is_capture ? "Record" : "Play", pipe); } i++; err2 = prmh->stat[i] ? prmh->stat[i] : prmh->stat[i+1]; if (err2) pcxhr_handle_async_err(mgr, err2, PCXHR_ERR_PIPE, pipe, is_capture); i += 2; for (j = 0; j < nb_stream; j++) { err2 = prmh->stat[i] ? prmh->stat[i] : prmh->stat[i+1]; if (err2) pcxhr_handle_async_err(mgr, err2, PCXHR_ERR_STREAM, pipe, is_capture); i += 2; } for (j = 0; j < nb_audio; j++) { err2 = prmh->stat[i] ? prmh->stat[i] : prmh->stat[i+1]; if (err2) pcxhr_handle_async_err(mgr, err2, PCXHR_ERR_AUDIO, pipe, is_capture); i += 2; } } } } static u_int64_t pcxhr_stream_read_position(struct pcxhr_mgr *mgr, struct pcxhr_stream *stream) { u_int64_t hw_sample_count; struct pcxhr_rmh rmh; int err, stream_mask; stream_mask = stream->pipe->is_capture ? 1 : 1<<stream->substream->number; /* get sample count for one stream */ pcxhr_init_rmh(&rmh, CMD_STREAM_SAMPLE_COUNT); pcxhr_set_pipe_cmd_params(&rmh, stream->pipe->is_capture, stream->pipe->first_audio, 0, stream_mask); /* rmh.stat_len = 2; */ /* 2 resp data for each stream of the pipe */ err = pcxhr_send_msg(mgr, &rmh); if (err) return 0; hw_sample_count = ((u_int64_t)rmh.stat[0]) << 24; hw_sample_count += (u_int64_t)rmh.stat[1]; snd_printdd("stream %c%d : abs samples real(%ld) timer(%ld)\n", stream->pipe->is_capture ? 'C' : 'P', stream->substream->number, (long unsigned int)hw_sample_count, (long unsigned int)(stream->timer_abs_periods + stream->timer_period_frag + mgr->granularity)); return hw_sample_count; } static void pcxhr_update_timer_pos(struct pcxhr_mgr *mgr, struct pcxhr_stream *stream, int samples_to_add) { if (stream->substream && (stream->status == PCXHR_STREAM_STATUS_RUNNING)) { u_int64_t new_sample_count; int elapsed = 0; int hardware_read = 0; struct snd_pcm_runtime *runtime = stream->substream->runtime; if (samples_to_add < 0) { stream->timer_is_synced = 0; /* add default if no hardware_read possible */ samples_to_add = mgr->granularity; } if (!stream->timer_is_synced) { if ((stream->timer_abs_periods != 0) || ((stream->timer_period_frag + samples_to_add) >= runtime->period_size)) { new_sample_count = pcxhr_stream_read_position(mgr, stream); hardware_read = 1; if (new_sample_count >= mgr->granularity) { /* sub security offset because of * jitter and finer granularity of * dsp time (MBOX4) */ new_sample_count -= mgr->granularity; stream->timer_is_synced = 1; } } } if (!hardware_read) { /* if we didn't try to sync the position, increment it * by PCXHR_GRANULARITY every timer interrupt */ new_sample_count = stream->timer_abs_periods + stream->timer_period_frag + samples_to_add; } while (1) { u_int64_t new_elapse_pos = stream->timer_abs_periods + runtime->period_size; if (new_elapse_pos > new_sample_count) break; elapsed = 1; stream->timer_buf_periods++; if (stream->timer_buf_periods >= runtime->periods) stream->timer_buf_periods = 0; stream->timer_abs_periods = new_elapse_pos; } if (new_sample_count >= stream->timer_abs_periods) { stream->timer_period_frag = (u_int32_t)(new_sample_count - stream->timer_abs_periods); } else { snd_printk(KERN_ERR "ERROR new_sample_count too small ??? %ld\n", (long unsigned int)new_sample_count); } if (elapsed) { spin_unlock(&mgr->lock); snd_pcm_period_elapsed(stream->substream); spin_lock(&mgr->lock); } } } irqreturn_t pcxhr_interrupt(int irq, void *dev_id) { struct pcxhr_mgr *mgr = dev_id; unsigned int reg; int i, j; struct snd_pcxhr *chip; spin_lock(&mgr->lock); reg = PCXHR_INPL(mgr, PCXHR_PLX_IRQCS); if (! (reg & PCXHR_IRQCS_ACTIVE_PCIDB)) { spin_unlock(&mgr->lock); /* this device did not cause the interrupt */ return IRQ_NONE; } /* clear interrupt */ reg = PCXHR_INPL(mgr, PCXHR_PLX_L2PCIDB); PCXHR_OUTPL(mgr, PCXHR_PLX_L2PCIDB, reg); /* timer irq occurred */ if (reg & PCXHR_IRQ_TIMER) { int timer_toggle = reg & PCXHR_IRQ_TIMER; /* is a 24 bit counter */ int dsp_time_new = PCXHR_INPL(mgr, PCXHR_PLX_MBOX4) & PCXHR_DSP_TIME_MASK; int dsp_time_diff = dsp_time_new - mgr->dsp_time_last; if ((dsp_time_diff < 0) && (mgr->dsp_time_last != PCXHR_DSP_TIME_INVALID)) { snd_printdd("ERROR DSP TIME old(%d) new(%d) -> " "resynchronize all streams\n", mgr->dsp_time_last, dsp_time_new); mgr->dsp_time_err++; } #ifdef CONFIG_SND_DEBUG_VERBOSE if (dsp_time_diff == 0) snd_printdd("ERROR DSP TIME NO DIFF time(%d)\n", dsp_time_new); else if (dsp_time_diff >= (2*mgr->granularity)) snd_printdd("ERROR DSP TIME TOO BIG old(%d) add(%d)\n", mgr->dsp_time_last, dsp_time_new - mgr->dsp_time_last); else if (dsp_time_diff % mgr->granularity) snd_printdd("ERROR DSP TIME increased by %d\n", dsp_time_diff); #endif mgr->dsp_time_last = dsp_time_new; if (timer_toggle == mgr->timer_toggle) { snd_printdd("ERROR TIMER TOGGLE\n"); mgr->dsp_time_err++; } mgr->timer_toggle = timer_toggle; reg &= ~PCXHR_IRQ_TIMER; for (i = 0; i < mgr->num_cards; i++) { chip = mgr->chip[i]; for (j = 0; j < chip->nb_streams_capt; j++) pcxhr_update_timer_pos(mgr, &chip->capture_stream[j], dsp_time_diff); } for (i = 0; i < mgr->num_cards; i++) { chip = mgr->chip[i]; for (j = 0; j < chip->nb_streams_play; j++) pcxhr_update_timer_pos(mgr, &chip->playback_stream[j], dsp_time_diff); } } /* other irq's handled in the tasklet */ if (reg & PCXHR_IRQ_MASK) { if (reg & PCXHR_IRQ_ASYNC) { /* as we didn't request any async notifications, * some kind of xrun error will probably occurred */ /* better resynchronize all streams next interrupt : */ mgr->dsp_time_last = PCXHR_DSP_TIME_INVALID; } mgr->src_it_dsp = reg; tasklet_schedule(&mgr->msg_taskq); } #ifdef CONFIG_SND_DEBUG_VERBOSE if (reg & PCXHR_FATAL_DSP_ERR) snd_printdd("FATAL DSP ERROR : %x\n", reg); #endif spin_unlock(&mgr->lock); return IRQ_HANDLED; /* this device caused the interrupt */ }
gpl-2.0
shumashv1/hp-kernel-tenderloin
arch/s390/kernel/ebcdic.c
9314
17898
/* * arch/s390/kernel/ebcdic.c * ECBDIC -> ASCII, ASCII -> ECBDIC, * upper to lower case (EBCDIC) conversion tables. * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Martin Peschke <peschke@fh-brandenburg.de> */ #include <linux/module.h> #include <asm/types.h> #include <asm/ebcdic.h> /* * ASCII (IBM PC 437) -> EBCDIC 037 */ __u8 _ascebc[256] = { /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, /*08 BS HT LF VT FF CR SO SI */ /* ->NL */ 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, /*18 CAN EM SUB ESC FS GS RS US */ /* ->IGS ->IRS ->IUS */ 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, /*20 SP ! " # $ % & ' */ 0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, /*28 ( ) * + , - . / */ 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, /*30 0 1 2 3 4 5 6 7 */ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, /*38 8 9 : ; < = > ? */ 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, /*40 @ A B C D E F G */ 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, /*48 H I J K L M N O */ 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, /*50 P Q R S T U V W */ 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, /*58 X Y Z [ \ ] ^ _ */ 0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D, /*60 ` a b c d e f g */ 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /*68 h i j k l m n o */ 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, /*70 p q r s t u v w */ 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, /*78 x y z { | } ~ DL */ 0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07, /*80*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*88*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*90*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*98*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*A0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*A8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*B0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*B8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*C0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*C8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*D0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*D8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*E0 sz */ 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*E8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*F0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*F8*/ 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF }; /* * EBCDIC 037 -> ASCII (IBM PC 437) */ __u8 _ebcasc[256] = { /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC -ENP ->LF */ 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB -IUS */ 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC -INP */ 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL -SW */ 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, /* 0x40 SP RSP ä ---- */ 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, /* 0x48 . < ( + | */ 0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C, /* 0x50 & ---- */ 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, /* 0x58 ß ! $ * ) ; */ 0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA, /* 0x60 - / ---- Ä ---- ---- ---- */ 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, /* 0x68 ---- , % _ > ? */ 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, /* 0x78 * ` : # @ ' = " */ 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, /* 0x80 * a b c d e f g */ 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x88 h i ---- ---- ---- */ 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, /* 0x90 ° j k l m n o p */ 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, /* 0x98 q r ---- ---- */ 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, /* 0xA0 ~ s t u v w x */ 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, /* 0xA8 y z ---- ---- ---- ---- */ 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, /* 0xB0 ^ ---- § ---- */ 0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, /* 0xB8 ---- [ ] ---- ---- ---- ---- */ 0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07, /* 0xC0 { A B C D E F G */ 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0xC8 H I ---- ö ---- */ 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, /* 0xD0 } J K L M N O P */ 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, /* 0xD8 Q R ---- ü */ 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, /* 0xE0 \ S T U V W X */ 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, /* 0xE8 Y Z ---- Ö ---- ---- ---- */ 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, /* 0xF0 0 1 2 3 4 5 6 7 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 }; /* * ASCII (IBM PC 437) -> EBCDIC 500 */ __u8 _ascebc_500[256] = { /*00 NUL SOH STX ETX EOT ENQ ACK BEL */ 0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F, /*08 BS HT LF VT FF CR SO SI */ /* ->NL */ 0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */ 0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26, /*18 CAN EM SUB ESC FS GS RS US */ /* ->IGS ->IRS ->IUS */ 0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F, /*20 SP ! " # $ % & ' */ 0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D, /*28 ( ) * + , - . / */ 0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61, /*30 0 1 2 3 4 5 6 7 */ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, /*38 8 9 : ; < = > ? */ 0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F, /*40 @ A B C D E F G */ 0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, /*48 H I J K L M N O */ 0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, /*50 P Q R S T U V W */ 0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, /*58 X Y Z [ \ ] ^ _ */ 0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D, /*60 ` a b c d e f g */ 0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /*68 h i j k l m n o */ 0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, /*70 p q r s t u v w */ 0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, /*78 x y z { | } ~ DL */ 0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07, /*80*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*88*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*90*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*98*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*A0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*A8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*B0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*B8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*C0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*C8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*D0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*D8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*E0 sz */ 0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*E8*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*F0*/ 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, /*F8*/ 0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF }; /* * EBCDIC 500 -> ASCII (IBM PC 437) */ __u8 _ebcasc_500[256] = { /* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */ 0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F, /* 0x08 -GE -SPS -RPT VT FF CR SO SI */ 0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC -ENP ->LF */ 0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07, /* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB -IUS */ 0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, /* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC -INP */ 0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B, /* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL -SW */ 0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07, /* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */ 0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04, /* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */ 0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A, /* 0x40 SP RSP ä ---- */ 0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86, /* 0x48 [ . < ( + ! */ 0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21, /* 0x50 & ---- */ 0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07, /* 0x58 ß ] $ * ) ; ^ */ 0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E, /* 0x60 - / ---- Ä ---- ---- ---- */ 0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F, /* 0x68 ---- , % _ > ? */ 0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F, /* 0x70 ---- ---- ---- ---- ---- ---- ---- */ 0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, /* 0x78 * ` : # @ ' = " */ 0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22, /* 0x80 * a b c d e f g */ 0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x88 h i ---- ---- ---- */ 0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1, /* 0x90 ° j k l m n o p */ 0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, /* 0x98 q r ---- ---- */ 0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07, /* 0xA0 ~ s t u v w x */ 0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, /* 0xA8 y z ---- ---- ---- ---- */ 0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07, /* 0xB0 ---- § ---- */ 0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC, /* 0xB8 ---- | ---- ---- ---- ---- */ 0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07, /* 0xC0 { A B C D E F G */ 0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0xC8 H I ---- ö ---- */ 0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07, /* 0xD0 } J K L M N O P */ 0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, /* 0xD8 Q R ---- ü */ 0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98, /* 0xE0 \ S T U V W X */ 0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, /* 0xE8 Y Z ---- Ö ---- ---- ---- */ 0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07, /* 0xF0 0 1 2 3 4 5 6 7 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0xF8 8 9 ---- ---- Ü ---- ---- ---- */ 0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07 }; /* * EBCDIC 037/500 conversion table: * from upper to lower case */ __u8 _ebc_tolower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, 0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF, 0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF }; /* * EBCDIC 037/500 conversion table: * from lower to upper case */ __u8 _ebc_toupper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F, 0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF }; EXPORT_SYMBOL(_ascebc_500); EXPORT_SYMBOL(_ebcasc_500); EXPORT_SYMBOL(_ascebc); EXPORT_SYMBOL(_ebcasc); EXPORT_SYMBOL(_ebc_tolower); EXPORT_SYMBOL(_ebc_toupper);
gpl-2.0
stefanbucur/linux-s2e
drivers/infiniband/hw/ipath/ipath_uc.c
13922
14459
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "ipath_verbs.h" #include "ipath_kernel.h" /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_UC_##x /** * ipath_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * * Return 1 if constructed; otherwise, return 0. */ int ipath_make_uc_req(struct ipath_qp *qp) { struct ipath_other_headers *ohdr; struct ipath_swqe *wqe; unsigned long flags; u32 hwords; u32 bth0; u32 len; u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == qp->s_head) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&qp->s_dma_busy)) { qp->s_flags |= IPATH_S_WAIT_DMA; goto bail; } wqe = get_swqe_ptr(qp, qp->s_last); ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done; } ohdr = &qp->s_hdr.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ohdr = &qp->s_hdr.u.l.oth; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; bth0 = 1 << 22; /* Set M bit */ /* Get the next send request. */ wqe = get_swqe_ptr(qp, qp->s_cur); qp->s_wqe = NULL; switch (qp->s_state) { default: if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ if (qp->s_cur == qp->s_head) goto bail; /* * Start a new request. */ qp->s_psn = wqe->psn = qp->s_next_psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_len = len = wqe->length; switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: if (len > pmtu) { qp->s_state = OP(SEND_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) qp->s_state = OP(SEND_ONLY); else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= 1 << 23; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->wr.wr.rdma.remote_addr); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->wr.wr.rdma.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / 4; if (len > pmtu) { qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qp->s_state = OP(RDMA_WRITE_ONLY); else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the RETH */ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= 1 << 23; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; default: goto bail; } break; case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); /* FALLTHROUGH */ case OP(SEND_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) qp->s_state = OP(SEND_LAST); else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= 1 << 23; qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): len = qp->s_len; if (len > pmtu) { len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qp->s_state = OP(RDMA_WRITE_LAST); else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= 1 << 23; } qp->s_wqe = wqe; if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; break; } qp->s_len -= len; qp->s_hdrwords = hwords; qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; ipath_make_ruc_header(to_idev(qp->ibqp.device), qp, ohdr, bth0 | (qp->s_state << 24), qp->s_next_psn++ & IPATH_PSN_MASK); done: ret = 1; goto unlock; bail: qp->s_flags &= ~IPATH_S_BUSY; unlock: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * ipath_uc_rcv - handle an incoming UC packet * @dev: the device the packet came in on * @hdr: the header of the packet * @has_grh: true if the packet has a GRH * @data: the packet data * @tlen: the length of the packet * @qp: the QP for this packet. * * This is called from ipath_qp_rcv() to process an incoming UC packet * for the given QP. * Called at interrupt level. */ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) { struct ipath_other_headers *ohdr; int opcode; u32 hdrsize; u32 psn; u32 pad; struct ib_wc wc; u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); struct ib_reth *reth; int header_in_data; /* Validate the SLID. See Ch. 9.6.1.5 */ if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) goto done; /* Check for GRH */ if (!has_grh) { ohdr = &hdr->u.oth; hdrsize = 8 + 12; /* LRH + BTH */ psn = be32_to_cpu(ohdr->bth[2]); header_in_data = 0; } else { ohdr = &hdr->u.l.oth; hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ /* * The header with GRH is 60 bytes and the * core driver sets the eager header buffer * size to 56 bytes so the last 4 bytes of * the BTH header (PSN) is in the data buffer. */ header_in_data = dev->dd->ipath_rcvhdrentsize == 16; if (header_in_data) { psn = be32_to_cpu(((__be32 *) data)[0]); data += sizeof(__be32); } else psn = be32_to_cpu(ohdr->bth[2]); } /* * The opcode is in the low byte when its in network order * (top byte when in host order). */ opcode = be32_to_cpu(ohdr->bth[0]) >> 24; memset(&wc, 0, sizeof wc); /* Compare the PSN verses the expected PSN. */ if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { /* * Handle a sequence error. * Silently drop any current message. */ qp->r_psn = psn; inv: qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): goto send_first; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): goto rdma_first; default: dev->n_pkt_drops++; goto done; } } /* Check for opcode sequence errors. */ switch (qp->r_state) { case OP(SEND_FIRST): case OP(SEND_MIDDLE): if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE)) break; goto inv; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_MIDDLE): if (opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) break; goto inv; default: if (opcode == OP(SEND_FIRST) || opcode == OP(SEND_ONLY) || opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || opcode == OP(RDMA_WRITE_FIRST) || opcode == OP(RDMA_WRITE_ONLY) || opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) break; goto inv; } /* OK, process the packet. */ switch (opcode) { case OP(SEND_FIRST): case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): send_first: if (qp->r_flags & IPATH_R_REUSE_SGE) { qp->r_flags &= ~IPATH_R_REUSE_SGE; qp->r_sge = qp->s_rdma_read_sge; } else if (!ipath_get_rwqe(qp, 0)) { dev->n_pkt_drops++; goto done; } /* Save the WQE so we can reuse it in case of an error. */ qp->s_rdma_read_sge = qp->r_sge; qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto send_last; else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) goto send_last_imm; /* FALLTHROUGH */ case OP(SEND_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) { qp->r_flags |= IPATH_R_REUSE_SGE; dev->n_pkt_drops++; goto done; } qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) { qp->r_flags |= IPATH_R_REUSE_SGE; dev->n_pkt_drops++; goto done; } ipath_copy_sge(&qp->r_sge, data, pmtu); break; case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: if (header_in_data) { wc.ex.imm_data = *(__be32 *) data; data += sizeof(__be32); } else { /* Immediate data comes after BTH */ wc.ex.imm_data = ohdr->u.imm_data; } hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; /* FALLTHROUGH */ case OP(SEND_LAST): send_last: /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* Check for invalid length. */ /* XXX LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) { qp->r_flags |= IPATH_R_REUSE_SGE; dev->n_pkt_drops++; goto done; } /* Don't count the CRC. */ tlen -= (hdrsize + pad + 4); wc.byte_len = tlen + qp->r_rcv_len; if (unlikely(wc.byte_len > qp->r_len)) { qp->r_flags |= IPATH_R_REUSE_SGE; dev->n_pkt_drops++; goto done; } wc.opcode = IB_WC_RECV; last_imm: ipath_copy_sge(&qp->r_sge, data, tlen); wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; /* Signal completion event if the solicited bit is set. */ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(1 << 23)) != 0); break; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ rdma_first: /* RETH comes after BTH */ if (!header_in_data) reth = &ohdr->u.rc.reth; else { reth = (struct ib_reth *)data; data += sizeof(*reth); } hdrsize += sizeof(*reth); qp->r_len = be32_to_cpu(reth->length); qp->r_rcv_len = 0; if (qp->r_len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = be64_to_cpu(reth->vaddr); int ok; /* Check rkey */ ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) { dev->n_pkt_drops++; goto done; } } else { qp->r_sge.sg_list = NULL; qp->r_sge.sge.mr = NULL; qp->r_sge.sge.vaddr = NULL; qp->r_sge.sge.length = 0; qp->r_sge.sge.sge_length = 0; } if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) { dev->n_pkt_drops++; goto done; } if (opcode == OP(RDMA_WRITE_ONLY)) goto rdma_last; else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) goto rdma_last_imm; /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) { dev->n_pkt_drops++; goto done; } qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) { dev->n_pkt_drops++; goto done; } ipath_copy_sge(&qp->r_sge, data, pmtu); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): rdma_last_imm: if (header_in_data) { wc.ex.imm_data = *(__be32 *) data; data += sizeof(__be32); } else { /* Immediate data comes after BTH */ wc.ex.imm_data = ohdr->u.imm_data; } hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* Check for invalid length. */ /* XXX LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) { dev->n_pkt_drops++; goto done; } /* Don't count the CRC. */ tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { dev->n_pkt_drops++; goto done; } if (qp->r_flags & IPATH_R_REUSE_SGE) qp->r_flags &= ~IPATH_R_REUSE_SGE; else if (!ipath_get_rwqe(qp, 1)) { dev->n_pkt_drops++; goto done; } wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; goto last_imm; case OP(RDMA_WRITE_LAST): rdma_last: /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* Check for invalid length. */ /* XXX LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) { dev->n_pkt_drops++; goto done; } /* Don't count the CRC. */ tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { dev->n_pkt_drops++; goto done; } ipath_copy_sge(&qp->r_sge, data, tlen); break; default: /* Drop packet for unknown opcodes. */ dev->n_pkt_drops++; goto done; } qp->r_psn++; qp->r_state = opcode; done: return; }
gpl-2.0
Fusion-Devices/android_kernel_cyanogen_msm8916
arch/parisc/math-emu/denormal.c
14178
3335
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/fp/denormal.c $ Revision: $ * * Purpose: * <<please update with a synopsis of the functionality provided by this file>> * * External Interfaces: * <<the following list was autogenerated, please review>> * dbl_denormalize(dbl_opndp1,dbl_opndp2,inexactflag,rmode) * sgl_denormalize(sgl_opnd,inexactflag,rmode) * * Internal Interfaces: * <<please update>> * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "hppa.h" #include <linux/kernel.h> /* #include <machine/sys/mdep_private.h> */ #undef Fpustatus_register #define Fpustatus_register Fpu_register[0] void sgl_denormalize(unsigned int *sgl_opnd, boolean *inexactflag, int rmode) { unsigned int opnd; int sign, exponent; boolean guardbit = FALSE, stickybit, inexact; opnd = *sgl_opnd; stickybit = *inexactflag; exponent = Sgl_exponent(opnd) - SGL_WRAP; sign = Sgl_sign(opnd); Sgl_denormalize(opnd,exponent,guardbit,stickybit,inexact); if (inexact) { switch (rmode) { case ROUNDPLUS: if (sign == 0) { Sgl_increment(opnd); } break; case ROUNDMINUS: if (sign != 0) { Sgl_increment(opnd); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Sgl_isone_lowmantissa(opnd))) { Sgl_increment(opnd); } break; } } Sgl_set_sign(opnd,sign); *sgl_opnd = opnd; *inexactflag = inexact; return; } void dbl_denormalize(unsigned int *dbl_opndp1, unsigned int * dbl_opndp2, boolean *inexactflag, int rmode) { unsigned int opndp1, opndp2; int sign, exponent; boolean guardbit = FALSE, stickybit, inexact; opndp1 = *dbl_opndp1; opndp2 = *dbl_opndp2; stickybit = *inexactflag; exponent = Dbl_exponent(opndp1) - DBL_WRAP; sign = Dbl_sign(opndp1); Dbl_denormalize(opndp1,opndp2,exponent,guardbit,stickybit,inexact); if (inexact) { switch (rmode) { case ROUNDPLUS: if (sign == 0) { Dbl_increment(opndp1,opndp2); } break; case ROUNDMINUS: if (sign != 0) { Dbl_increment(opndp1,opndp2); } break; case ROUNDNEAREST: if (guardbit && (stickybit || Dbl_isone_lowmantissap2(opndp2))) { Dbl_increment(opndp1,opndp2); } break; } } Dbl_set_sign(opndp1,sign); *dbl_opndp1 = opndp1; *dbl_opndp2 = opndp2; *inexactflag = inexact; return; }
gpl-2.0
sj8023ld/xperia_msm7x30_kernel
sound/oss/hex2hex.c
14434
1966
/* * hex2hex reads stdin in Intel HEX format and produces an * (unsigned char) array which contains the bytes and writes it * to stdout using C syntax */ #include <stdio.h> #include <string.h> #include <stdlib.h> #define ABANDON(why) { fprintf(stderr, "%s\n", why); exit(1); } #define MAX_SIZE (256*1024) unsigned char buf[MAX_SIZE]; static int loadhex(FILE *inf, unsigned char *buf) { int l=0, c, i; while ((c=getc(inf))!=EOF) { if (c == ':') /* Sync with beginning of line */ { int n, check; unsigned char sum; int addr; int linetype; if (fscanf(inf, "%02x", &n) != 1) ABANDON("File format error"); sum = n; if (fscanf(inf, "%04x", &addr) != 1) ABANDON("File format error"); sum += addr/256; sum += addr%256; if (fscanf(inf, "%02x", &linetype) != 1) ABANDON("File format error"); sum += linetype; if (linetype != 0) continue; for (i=0;i<n;i++) { if (fscanf(inf, "%02x", &c) != 1) ABANDON("File format error"); if (addr >= MAX_SIZE) ABANDON("File too large"); buf[addr++] = c; if (addr > l) l = addr; sum += c; } if (fscanf(inf, "%02x", &check) != 1) ABANDON("File format error"); sum = ~sum + 1; if (check != sum) ABANDON("Line checksum error"); } } return l; } int main( int argc, const char * argv [] ) { const char * varline; int i,l; int id=0; if(argv[1] && strcmp(argv[1], "-i")==0) { argv++; argc--; id=1; } if(argv[1]==NULL) { fprintf(stderr,"hex2hex: [-i] filename\n"); exit(1); } varline = argv[1]; l = loadhex(stdin, buf); printf("/*\n *\t Computer generated file. Do not edit.\n */\n"); printf("static int %s_len = %d;\n", varline, l); printf("static unsigned char %s[] %s = {\n", varline, id?"__initdata":""); for (i=0;i<l;i++) { if (i) printf(","); if (i && !(i % 16)) printf("\n"); printf("0x%02x", buf[i]); } printf("\n};\n\n"); return 0; }
gpl-2.0
milaq/linux-hpc
arch/ppc/boot/of1275/call_prom.c
99
1488
/* * Copyright (C) 1996-2005 Paul Mackerras. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include "of1275.h" #include <stdarg.h> int call_prom(const char *service, int nargs, int nret, ...) { int i; struct prom_args { const char *service; int nargs; int nret; unsigned int args[12]; } args; va_list list; args.service = service; args.nargs = nargs; args.nret = nret; va_start(list, nret); for (i = 0; i < nargs; i++) args.args[i] = va_arg(list, unsigned int); va_end(list); for (i = 0; i < nret; i++) args.args[nargs+i] = 0; if (of_prom_entry(&args) < 0) return -1; return (nret > 0)? args.args[nargs]: 0; } int call_prom_ret(const char *service, int nargs, int nret, unsigned int *rets, ...) { int i; struct prom_args { const char *service; int nargs; int nret; unsigned int args[12]; } args; va_list list; args.service = service; args.nargs = nargs; args.nret = nret; va_start(list, rets); for (i = 0; i < nargs; i++) args.args[i] = va_arg(list, unsigned int); va_end(list); for (i = 0; i < nret; i++) args.args[nargs+i] = 0; if (of_prom_entry(&args) < 0) return -1; if (rets != (void *) 0) for (i = 1; i < nret; ++i) rets[i-1] = args.args[nargs+i]; return (nret > 0)? args.args[nargs]: 0; }
gpl-2.0
smx-smx/dsl-n55u-bender
release/src/router/samba3/testsuite/libsmbclient/src/write/write_2.c
99
1359
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <libsmbclient.h> #define MAX_BUFF_SIZE 255 char g_workgroup[MAX_BUFF_SIZE]; char g_username[MAX_BUFF_SIZE]; char g_password[MAX_BUFF_SIZE]; char g_server[MAX_BUFF_SIZE]; char g_share[MAX_BUFF_SIZE]; void auth_fn(const char *server, const char *share, char *workgroup, int wgmaxlen, char *username, int unmaxlen, char *password, int pwmaxlen) { strncpy(workgroup, g_workgroup, wgmaxlen - 1); strncpy(username, g_username, unmaxlen - 1); strncpy(password, g_password, pwmaxlen - 1); strcpy(g_server, server); strcpy(g_share, share); } int main(int argc, char** argv) { int err = -1; int fd = 0; int msg_len = 0; char url[MAX_BUFF_SIZE]; char* message; bzero(g_workgroup,MAX_BUFF_SIZE); bzero(url,MAX_BUFF_SIZE); if ( argc == 6 ) { strncpy(g_workgroup,argv[1],strlen(argv[1])); strncpy(g_username,argv[2],strlen(argv[2])); strncpy(g_password,argv[3],strlen(argv[3])); strncpy(url,argv[4],strlen(argv[4])); msg_len = strlen(argv[5])+1; message = malloc(msg_len); message[msg_len - 1] = 0; strncpy(message,argv[5],msg_len); smbc_init(auth_fn, 0); smbc_unlink(url); fd = smbc_open(url,O_RDWR | O_CREAT, 0666); smbc_write(fd, message, msg_len); err = errno; smbc_close(fd); free(message); } return err; }
gpl-2.0
bedwa/P6800-Kernel
arch/arm/mach-mmp/mmp2.c
99
5324
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/regs-apmu.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/gpio.h> #include <mach/devices.h> #include <mach/mmp2.h> #include "common.h" #include "clock.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) #define APMASK(i) (GPIO_REGS_VIRT + BANK_OFF(i) + 0x9c) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { unsigned long mfpr_pmic, data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } static void __init mmp2_init_gpio(void) { int i; /* enable GPIO clock */ __raw_writel(APBC_APBCLK | APBC_FNCLK, APBC_MMP2_GPIO); /* unmask GPIO edge detection for all 6 banks -- APMASKx */ for (i = 0; i < 6; i++) __raw_writel(0xffffffff, APMASK(i)); pxa_init_gpio(IRQ_MMP2_GPIO, 0, 167, NULL); } void __init mmp2_init_irq(void) { mmp2_init_icu(); mmp2_init_gpio(); } /* APB peripheral clocks */ static APBC_CLK(uart1, MMP2_UART1, 1, 26000000); static APBC_CLK(uart2, MMP2_UART2, 1, 26000000); static APBC_CLK(uart3, MMP2_UART3, 1, 26000000); static APBC_CLK(uart4, MMP2_UART4, 1, 26000000); static APBC_CLK(twsi1, MMP2_TWSI1, 0, 26000000); static APBC_CLK(twsi2, MMP2_TWSI2, 0, 26000000); static APBC_CLK(twsi3, MMP2_TWSI3, 0, 26000000); static APBC_CLK(twsi4, MMP2_TWSI4, 0, 26000000); static APBC_CLK(twsi5, MMP2_TWSI5, 0, 26000000); static APBC_CLK(twsi6, MMP2_TWSI6, 0, 26000000); static APBC_CLK(rtc, MMP2_RTC, 0, 32768); static APMU_CLK(nand, NAND, 0xbf, 100000000); static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL), INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL), INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL), INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL), INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL), INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL), INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL), INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL), INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), }; static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs)); } return 0; } postcore_initcall(mmp2_init); static void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_MMP2_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_MMP2_TIMERS); timer_init(IRQ_MMP2_TIMER1); } struct sys_timer mmp2_timer = { .init = mmp2_timer_init, }; /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29);
gpl-2.0
dennes544/dennes544_kernel_lge_hammerhead
arch/arm/kernel/perf_event.c
99
27871
#undef DEBUG /* * ARM performance counter support. * * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> * * This code is based on the sparc64 perf event code, which is in turn based * on the x86 code. Callchain code is based on the ARM OProfile backtrace * code. */ #define pr_fmt(fmt) "hw perfevents: " fmt #include <linux/bitmap.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/irq.h> #include <linux/of.h> #include <asm/cputype.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/pmu.h> #include <asm/stacktrace.h> #include <linux/cpu_pm.h> /* * ARMv6 supports a maximum of 3 events, starting from index 0. If we add * another platform that supports more, we need to increase this to be the * largest of all platforms. * * ARMv7 supports up to 32 events: * cycle counter CCNT + 31 events counters CNT0..30. * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. */ #define ARMPMU_MAX_HWEVENTS 32 static DEFINE_PER_CPU(u32, from_idle); static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) /* Set at runtime when we know what CPU type we are. */ static struct arm_pmu *cpu_pmu; static int per_cpu_irq; enum arm_perf_pmu_ids armpmu_get_pmu_id(void) { int id = -ENODEV; if (cpu_pmu != NULL) id = cpu_pmu->id; return id; } EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); int perf_num_counters(void) { int max_events = 0; if (cpu_pmu != NULL) max_events = cpu_pmu->num_events; return max_events; } EXPORT_SYMBOL_GPL(perf_num_counters); #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) \ PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xFFFF static int armpmu_map_cache_event(unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX], u64 config) { unsigned int cache_type, cache_op, cache_result, ret; cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; if (ret == CACHE_OP_UNSUPPORTED) return -ENOENT; return ret; } static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { int mapping; if (config >= PERF_COUNT_HW_MAX) return -ENOENT; mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } static int armpmu_map_raw_event(u32 raw_event_mask, u64 config) { return (int)(config & raw_event_mask); } static int map_cpu_event(struct perf_event *event, const unsigned (*event_map)[PERF_COUNT_HW_MAX], unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask) { u64 config = event->attr.config; switch (event->attr.type) { case PERF_TYPE_HARDWARE: return armpmu_map_event(event_map, config); case PERF_TYPE_HW_CACHE: return armpmu_map_cache_event(cache_map, config); case PERF_TYPE_RAW: return armpmu_map_raw_event(raw_event_mask, config); } return -ENOENT; } int armpmu_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > (s64)armpmu->max_period) left = armpmu->max_period; local64_set(&hwc->prev_count, (u64)-left); armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; if (event->state <= PERF_EVENT_STATE_OFF) return 0; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = armpmu->read_counter(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } static void armpmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; /* Don't read disabled counters! */ if (hwc->idx < 0) return; armpmu_event_update(event, hwc, hwc->idx); } static void armpmu_stop(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; /* * ARM pmu always has to update the counter, so ignore * PERF_EF_UPDATE, see comments in armpmu_start(). */ if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } static void armpmu_start(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; /* * ARM pmu always has to reprogram the period, so ignore * PERF_EF_RELOAD, see the comment below. */ if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; /* * Set the period again. Some counters can't be stopped, so when we * were stopped we simply disabled the IRQ source and the counter * may have been left counting. If we don't do this step then we may * get an interrupt too soon or *way* too late if the overflow has * happened since disabling. */ armpmu_event_set_period(event, hwc, hwc->idx); armpmu->enable(hwc, hwc->idx, event->cpu); } static void armpmu_del(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; WARN_ON(idx < 0); armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); /* Clear event constraints. */ if (armpmu->clear_event_constraints) armpmu->clear_event_constraints(event); perf_event_update_userpage(event); } static int armpmu_add(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; perf_pmu_disable(event->pmu); /* * Tests if event is constrained. If not sets it so that next * collision can be detected. */ if (armpmu->test_set_event_constraints) if (armpmu->test_set_event_constraints(event) < 0) { pr_err("Event: %llx failed constraint check.\n", event->attr.config); event->state = PERF_EVENT_STATE_OFF; goto out; } /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(hw_events, hwc); if (idx < 0) { err = idx; goto out; } /* * If there is an event in the counter we are going to use then make * sure it is disabled. */ event->hw.idx = idx; armpmu->disable(hwc, idx); hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) armpmu_start(event, PERF_EF_RELOAD); /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); out: perf_pmu_enable(event->pmu); return err; } static int validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct pmu_hw_events fake_pmu; DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); /* * Initialise the fake PMU. We only need to populate the * used_mask for the purposes of validation. */ memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; if (!validate_event(&fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_pmu, sibling)) return -EINVAL; } if (!validate_event(&fake_pmu, event)) return -EINVAL; return 0; } static irqreturn_t armpmu_platform_irq(int irq, void *dev) { struct arm_pmu *armpmu = (struct arm_pmu *) dev; struct platform_device *plat_device = armpmu->plat_device; struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); return plat->handle_irq(irq, dev, armpmu->handle_irq); } static DEFINE_PER_CPU(u32, pmu_irq_cookie); void enable_irq_callback(void *info) { int irq = *(unsigned int *)info; enable_percpu_irq(irq, IRQ_TYPE_EDGE_RISING); } void disable_irq_callback(void *info) { int irq = *(unsigned int *)info; disable_percpu_irq(irq); } int multicore_request_irq(int irq, irq_handler_t *handle_irq) { int err = 0; int cpu; err = request_percpu_irq(irq, *handle_irq, "l1-armpmu", &pmu_irq_cookie); if (!err) { for_each_cpu(cpu, cpu_online_mask) { smp_call_function_single(cpu, enable_irq_callback, &irq, 1); } } return err; } #ifdef CONFIG_SMP static __ref int armpmu_cpu_up(int cpu) { int ret = 0; if (!cpumask_test_cpu(cpu, cpu_online_mask)) { ret = cpu_up(cpu); if (ret) pr_err("Failed to bring up CPU: %d, ret: %d\n", cpu, ret); } return ret; } #else static inline int armpmu_cpu_up(int cpu) { return 0; } #endif void __ref multicore_free_irq(int irq) { int cpu; struct irq_desc *desc = irq_to_desc(irq); if (irq >= 0) { for_each_cpu(cpu, desc->percpu_enabled) { if (!armpmu_cpu_up(cpu)) smp_call_function_single(cpu, disable_irq_callback, &irq, 1); } free_percpu_irq(irq, &pmu_irq_cookie); } } struct arm_pmu_platdata multicore_data = { .request_pmu_irq = multicore_request_irq, .free_pmu_irq = multicore_free_irq, }; int armpmu_generic_request_irq(int irq, irq_handler_t *handle_irq) { return request_irq(irq, *handle_irq, IRQF_DISABLED | IRQF_NOBALANCING, "armpmu", NULL); } void armpmu_generic_free_irq(int irq) { if (irq >= 0) free_irq(irq, NULL); } static void armpmu_release_hardware(struct arm_pmu *armpmu) { int i, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; irqs = min(pmu_device->num_resources, num_possible_cpus()); for (i = 0; i < irqs; ++i) { if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); armpmu->free_pmu_irq(irq); } release_pmu(armpmu->type); } static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { struct arm_pmu_platdata *plat; irq_handler_t handle_irq; int i, err, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) return -ENODEV; err = reserve_pmu(armpmu->type); if (err) { pr_warning("unable to reserve pmu\n"); return err; } plat = dev_get_platdata(&pmu_device->dev); if (plat && plat->handle_irq) handle_irq = armpmu_platform_irq; else handle_irq = armpmu->handle_irq; if (plat && plat->request_pmu_irq) armpmu->request_pmu_irq = plat->request_pmu_irq; else if (!armpmu->request_pmu_irq) armpmu->request_pmu_irq = armpmu_generic_request_irq; if (plat && plat->free_pmu_irq) armpmu->free_pmu_irq = plat->free_pmu_irq; else if (!armpmu->free_pmu_irq) armpmu->free_pmu_irq = armpmu_generic_free_irq; irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { pr_err("no irqs for PMUs defined\n"); return -ENODEV; } for (i = 0; i < irqs; ++i) { err = 0; irq = platform_get_irq(pmu_device, i); if (irq < 0) continue; /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", irq, i); continue; } err = armpmu->request_pmu_irq(irq, &handle_irq); if (err) { pr_warning("unable to request IRQ%d for %s perf " "counters\n", irq, armpmu->name); armpmu_release_hardware(cpu_pmu); return err; } cpumask_set_cpu(i, &armpmu->active_irqs); } return 0; } static void hw_perf_event_destroy(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); atomic_t *active_events = &armpmu->active_events; struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { armpmu_release_hardware(armpmu); mutex_unlock(pmu_reserve_mutex); } } static int event_requires_mode_exclusion(struct perf_event_attr *attr) { return attr->exclude_idle || attr->exclude_user || attr->exclude_kernel || attr->exclude_hv; } static int __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int mapping, err; mapping = armpmu->map_event(event); if (mapping < 0) { pr_debug("event %x:%llx not supported\n", event->attr.type, event->attr.config); return mapping; } /* * We don't assign an index until we actually place the event onto * hardware. Use -1 to signify that we haven't decided where to put it * yet. For SMP systems, each core has it's own PMU so we can't do any * clever allocation or constraints checking at this point. */ hwc->idx = -1; hwc->config_base = 0; hwc->config = 0; hwc->event_base = 0; /* * Check whether we need to exclude the counter from certain modes. */ if ((!armpmu->set_event_filter || armpmu->set_event_filter(hwc, &event->attr)) && event_requires_mode_exclusion(&event->attr)) { pr_debug("ARM performance counters do not support " "mode exclusion\n"); return -EPERM; } /* * Store the event encoding into the config_base field. */ hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { /* * For non-sampling runs, limit the sample_period to half * of the counter width. That way, the new counter value * is far less likely to overtake the previous one unless * you have some serious IRQ latency issues. */ hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } err = 0; if (event->group_leader != event) { err = validate_group(event); if (err) return -EINVAL; } return err; } static int armpmu_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); int err = 0; atomic_t *active_events = &armpmu->active_events; /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; if (armpmu->map_event(event) == -ENOENT) return -ENOENT; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(active_events)) { mutex_lock(&armpmu->reserve_mutex); if (atomic_read(active_events) == 0) err = armpmu_reserve_hardware(armpmu); if (!err) atomic_inc(active_events); mutex_unlock(&armpmu->reserve_mutex); } if (err) return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); return err; } static void armpmu_enable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); int idx; if (__get_cpu_var(from_idle)) { for (idx = 0; idx <= cpu_pmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; armpmu->enable(&event->hw, idx, event->cpu); } /* Reset bit so we don't needlessly re-enable counters.*/ __get_cpu_var(from_idle) = 0; } /* So we don't start the PMU before enabling counters after idle. */ barrier(); if (enabled) armpmu->start(); } static void armpmu_disable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); armpmu->stop(); } static void armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); mutex_init(&armpmu->reserve_mutex); armpmu->pmu.pmu_enable = armpmu_enable; armpmu->pmu.pmu_disable = armpmu_disable; armpmu->pmu.event_init = armpmu_event_init; armpmu->pmu.add = armpmu_add; armpmu->pmu.del = armpmu_del; armpmu->pmu.start = armpmu_start; armpmu->pmu.stop = armpmu_stop; armpmu->pmu.read = armpmu_read; armpmu->pmu.events_across_hotplug = 1; } int armpmu_register(struct arm_pmu *armpmu, char *name, int type) { armpmu_init(armpmu); return perf_pmu_register(&armpmu->pmu, name, type); } /* Include the PMU-specific implementations. */ #include "perf_event_xscale.c" #include "perf_event_v6.c" #include "perf_event_v7.c" #include "perf_event_msm_krait.c" #include "perf_event_msm.c" /* * Ensure the PMU has sane values out of reset. * This requires SMP to be available, so exists as a separate initcall. */ static int __init cpu_pmu_reset(void) { if (cpu_pmu && cpu_pmu->reset) return on_each_cpu(cpu_pmu->reset, NULL, 1); return 0; } arch_initcall(cpu_pmu_reset); /* * PMU platform driver and devicetree bindings. */ static struct of_device_id armpmu_of_device_ids[] = { {.compatible = "arm,cortex-a9-pmu"}, {.compatible = "arm,cortex-a8-pmu"}, {.compatible = "arm,cortex-a7-pmu"}, {.compatible = "arm,cortex-a5-pmu"}, {.compatible = "arm,arm1136-pmu"}, {.compatible = "arm,arm1176-pmu"}, {.compatible = "qcom,krait-pmu"}, {}, }; static struct platform_device_id armpmu_plat_device_ids[] = { {.name = "cpu-pmu"}, {}, }; static int __devinit armpmu_device_probe(struct platform_device *pdev) { if (!cpu_pmu) return -ENODEV; cpu_pmu->plat_device = pdev; if (per_cpu_irq == 1) cpu_pmu->plat_device->dev.platform_data = &multicore_data; return 0; } static struct platform_driver armpmu_driver = { .driver = { .name = "cpu-pmu", .of_match_table = armpmu_of_device_ids, }, .probe = armpmu_device_probe, .id_table = armpmu_plat_device_ids, }; static int __init register_pmu_driver(void) { return platform_driver_register(&armpmu_driver); } device_initcall(register_pmu_driver); static struct pmu_hw_events *armpmu_get_cpu_events(void) { return &__get_cpu_var(cpu_hw_events); } static void __init cpu_pmu_init(struct arm_pmu *armpmu) { int cpu; for_each_possible_cpu(cpu) { struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); events->events = per_cpu(hw_events, cpu); events->used_mask = per_cpu(used_mask, cpu); raw_spin_lock_init(&events->pmu_lock); } armpmu->get_hw_events = armpmu_get_cpu_events; armpmu->type = ARM_PMU_DEVICE_CPU; } static int cpu_has_active_perf(int cpu) { struct pmu_hw_events *hw_events; int enabled; if (!cpu_pmu) return 0; hw_events = &per_cpu(cpu_hw_events, cpu); enabled = bitmap_weight(hw_events->used_mask, cpu_pmu->num_events); if (enabled) /*Even one event's existence is good enough.*/ return 1; return 0; } static void armpmu_update_counters(void) { struct pmu_hw_events *hw_events; int idx; if (!cpu_pmu) return; hw_events = cpu_pmu->get_hw_events(); for (idx = 0; idx <= cpu_pmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; if (!event) continue; armpmu_read(event); } } /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ static int __cpuinit pmu_cpu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { int irq; struct pmu *pmu; int cpu = (int)hcpu; switch ((action & ~CPU_TASKS_FROZEN)) { case CPU_DOWN_PREPARE: if (cpu_pmu && cpu_pmu->save_pm_registers) smp_call_function_single(cpu, cpu_pmu->save_pm_registers, hcpu, 1); break; case CPU_STARTING: if (cpu_pmu && cpu_pmu->restore_pm_registers) smp_call_function_single(cpu, cpu_pmu->restore_pm_registers, hcpu, 1); } if (cpu_has_active_perf((int)hcpu)) { switch ((action & ~CPU_TASKS_FROZEN)) { case CPU_DOWN_PREPARE: armpmu_update_counters(); /* * If this is on a multicore CPU, we need * to disarm the PMU IRQ before disappearing. */ if (cpu_pmu && cpu_pmu->plat_device->dev.platform_data) { irq = platform_get_irq(cpu_pmu->plat_device, 0); smp_call_function_single((int)hcpu, disable_irq_callback, &irq, 1); } return NOTIFY_DONE; case CPU_STARTING: /* * If this is on a multicore CPU, we need * to arm the PMU IRQ before appearing. */ if (cpu_pmu && cpu_pmu->plat_device->dev.platform_data) { irq = platform_get_irq(cpu_pmu->plat_device, 0); enable_irq_callback(&irq); } if (cpu_pmu && cpu_pmu->reset) { __get_cpu_var(from_idle) = 1; cpu_pmu->reset(NULL); pmu = &cpu_pmu->pmu; pmu->pmu_enable(pmu); return NOTIFY_OK; } default: return NOTIFY_DONE; } } if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) return NOTIFY_DONE; return NOTIFY_OK; } static struct notifier_block __cpuinitdata pmu_cpu_notifier = { .notifier_call = pmu_cpu_notify, }; /*TODO: Unify with pending patch from ARM */ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { struct pmu *pmu; switch (cmd) { case CPU_PM_ENTER: if (cpu_pmu && cpu_pmu->save_pm_registers) cpu_pmu->save_pm_registers((void *)smp_processor_id()); if (cpu_has_active_perf((int)v)) { armpmu_update_counters(); pmu = &cpu_pmu->pmu; pmu->pmu_disable(pmu); } break; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: if (cpu_pmu && cpu_pmu->restore_pm_registers) cpu_pmu->restore_pm_registers( (void *)smp_processor_id()); if (cpu_has_active_perf((int)v) && cpu_pmu->reset) { /* * Flip this bit so armpmu_enable knows it needs * to re-enable active counters. */ __get_cpu_var(from_idle) = 1; cpu_pmu->reset(NULL); pmu = &cpu_pmu->pmu; pmu->pmu_enable(pmu); } break; } return NOTIFY_OK; } static struct notifier_block perf_cpu_pm_notifier_block = { .notifier_call = perf_cpu_pm_notifier, }; #ifdef CONFIG_OF static inline int get_dt_irq_prop(void) { struct device_node *np = NULL; int err = -1; np = of_find_matching_node(NULL, armpmu_of_device_ids); if (np) err = of_property_read_bool(np, "qcom,irq-is-percpu"); else pr_err("Perf: can't find DT node.\n"); return err; } #else static inline int get_dt_irq_prop(void) {return 0; } #endif /* * CPU PMU identification and registration. */ static int __init init_hw_perf_events(void) { unsigned long cpuid = read_cpuid_id(); unsigned long implementor = (cpuid & 0xFF000000) >> 24; unsigned long part_number = (cpuid & 0xFFF0); /* ARM Ltd CPUs. */ if (0x41 == implementor) { switch (part_number) { case 0xB360: /* ARM1136 */ case 0xB560: /* ARM1156 */ case 0xB760: /* ARM1176 */ cpu_pmu = armv6pmu_init(); break; case 0xB020: /* ARM11mpcore */ cpu_pmu = armv6mpcore_pmu_init(); break; case 0xC080: /* Cortex-A8 */ cpu_pmu = armv7_a8_pmu_init(); break; case 0xC090: /* Cortex-A9 */ cpu_pmu = armv7_a9_pmu_init(); break; case 0xC050: /* Cortex-A5 */ cpu_pmu = armv7_a5_pmu_init(); break; case 0xC0F0: /* Cortex-A15 */ cpu_pmu = armv7_a15_pmu_init(); break; case 0xC070: /* Cortex-A7 */ cpu_pmu = armv7_a7_pmu_init(); break; } /* Intel CPUs [xscale]. */ } else if (0x69 == implementor) { part_number = (cpuid >> 13) & 0x7; switch (part_number) { case 1: cpu_pmu = xscale1pmu_init(); break; case 2: cpu_pmu = xscale2pmu_init(); break; } /* Qualcomm CPUs */ } else if (0x51 == implementor) { switch (part_number) { case 0x00F0: /* 8x50 & 7x30*/ cpu_pmu = armv7_scorpion_pmu_init(); break; case 0x02D0: /* 8x60 */ // fabricmon_pmu_init(); cpu_pmu = armv7_scorpionmp_pmu_init(); break; case 0x0490: /* 8960 sim */ case 0x04D0: /* 8960 */ case 0x06F0: /* 8064 */ // fabricmon_pmu_init(); cpu_pmu = armv7_krait_pmu_init(); break; } } if (cpu_pmu) { pr_info("enabled with %s PMU driver, %d counters available\n", cpu_pmu->name, cpu_pmu->num_events); cpu_pmu_init(cpu_pmu); register_cpu_notifier(&pmu_cpu_notifier); armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); cpu_pm_register_notifier(&perf_cpu_pm_notifier_block); per_cpu_irq = get_dt_irq_prop(); } else { pr_info("no hardware support available\n"); } return 0; } early_initcall(init_hw_perf_events); /* * Callchain handling code. */ /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: * (struct frame_tail *)(xxx->fp)-1 * * This code has been adapted from the ARM OProfile support. */ struct frame_tail { struct frame_tail __user *fp; unsigned long sp; unsigned long lr; } __attribute__((packed)); /* * Get the return address for a single stackframe and return a pointer to the * next frame tail. */ static struct frame_tail __user * user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) { struct frame_tail buftail; /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) return NULL; perf_callchain_store(entry, buftail.lr); /* * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ if (tail + 1 >= buftail.fp) return NULL; return buftail.fp - 1; } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail __user *tail; tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) && tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } /* * Gets called by walk_stackframe() for every stackframe. This will be called * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ static int callchain_trace(struct stackframe *fr, void *data) { struct perf_callchain_entry *entry = data; perf_callchain_store(entry, fr->pc); return 0; } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; fr.fp = regs->ARM_fp; fr.sp = regs->ARM_sp; fr.lr = regs->ARM_lr; fr.pc = regs->ARM_pc; walk_stackframe(&fr, callchain_trace, entry); }
gpl-2.0
ignatenkobrain/linux
arch/mips/mm/tlb-r3k.c
355
6516
/* * r2300.c: R2000 and R3000 specific mmu/cache code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * with a lot of changes to make this thing work for R3000s * Tx39XX R4k style caches added. HK * Copyright (C) 1998, 1999, 2000 Harald Koerfgen * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov * Copyright (C) 2002 Ralf Baechle * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/tlbmisc.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> #undef DEBUG_TLB extern void build_tlb_refill_handler(void); /* CP0 hazard avoidance. */ #define BARRIER \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "nop\n\t" \ ".set pop\n\t") int r3k_have_wired_reg; /* should be in cpu_data? */ /* TLB operations. */ void local_flush_tlb_all(void) { unsigned long flags; unsigned long old_ctx; int entry; #ifdef DEBUG_TLB printk("[tlball]"); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(0); entry = r3k_have_wired_reg ? read_c0_wired() : 8; for (; entry < current_cpu_data.tlbsize; entry++) { write_c0_index(entry << 8); write_c0_entryhi((entry | 0x80000) << 12); BARRIER; tlb_write_indexed(); } write_c0_entryhi(old_ctx); local_irq_restore(flags); } void local_flush_tlb_mm(struct mm_struct *mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { #ifdef DEBUG_TLB printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); #endif drop_mmu_context(mm, cpu); } } void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) { unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", cpu_context(cpu, mm) & ASID_MASK, start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int oldpid = read_c0_entryhi() & ASID_MASK; int newpid = cpu_context(cpu, mm) & ASID_MASK; start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start | newpid); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(oldpid); } else { drop_mmu_context(mm, cpu); } local_irq_restore(flags); } } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long size, flags; #ifdef DEBUG_TLB printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end); #endif local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size <= current_cpu_data.tlbsize) { int pid = read_c0_entryhi(); start &= PAGE_MASK; end += PAGE_SIZE - 1; end &= PAGE_MASK; while (start < end) { int idx; write_c0_entryhi(start); start += PAGE_SIZE; /* BARRIER */ tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ continue; tlb_write_indexed(); } write_c0_entryhi(pid); } else { local_flush_tlb_all(); } local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); if (!vma || cpu_context(cpu, vma->vm_mm) != 0) { unsigned long flags; int oldpid, newpid, idx; #ifdef DEBUG_TLB printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); #endif newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); oldpid = read_c0_entryhi() & ASID_MASK; write_c0_entryhi(page | newpid); BARRIER; tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(0); write_c0_entryhi(KSEG0); if (idx < 0) /* BARRIER */ goto finish; tlb_write_indexed(); finish: write_c0_entryhi(oldpid); local_irq_restore(flags); } } void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags; int idx, pid; /* * Handle debugger faulting in for debugee. */ if (current->active_mm != vma->vm_mm) return; pid = read_c0_entryhi() & ASID_MASK; #ifdef DEBUG_TLB if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", (cpu_context(cpu, vma->vm_mm)), pid); } #endif local_irq_save(flags); address &= PAGE_MASK; write_c0_entryhi(address | pid); BARRIER; tlb_probe(); idx = read_c0_index(); write_c0_entrylo0(pte_val(pte)); write_c0_entryhi(address | pid); if (idx < 0) { /* BARRIER */ tlb_write_random(); } else { tlb_write_indexed(); } write_c0_entryhi(pid); local_irq_restore(flags); } void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { unsigned long flags; unsigned long old_ctx; static unsigned long wired = 0; if (r3k_have_wired_reg) { /* TX39XX */ unsigned long old_pagemask; unsigned long w; #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n", entrylo0, entryhi, pagemask); #endif local_irq_save(flags); /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi() & ASID_MASK; old_pagemask = read_c0_pagemask(); w = read_c0_wired(); write_c0_wired(w + 1); write_c0_index(w << 8); write_c0_pagemask(pagemask); write_c0_entryhi(entryhi); write_c0_entrylo0(entrylo0); BARRIER; tlb_write_indexed(); write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); local_flush_tlb_all(); local_irq_restore(flags); } else if (wired < 8) { #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n", entrylo0, entryhi); #endif local_irq_save(flags); old_ctx = read_c0_entryhi() & ASID_MASK; write_c0_entrylo0(entrylo0); write_c0_entryhi(entryhi); write_c0_index(wired); wired++; /* BARRIER */ tlb_write_indexed(); write_c0_entryhi(old_ctx); local_flush_tlb_all(); local_irq_restore(flags); } } void tlb_init(void) { local_flush_tlb_all(); build_tlb_refill_handler(); }
gpl-2.0
Chad0989/incredikernel
arch/x86/boot/compressed/mkpiggy.c
611
2782
/* ----------------------------------------------------------------------- * * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * H. Peter Anvin <hpa@linux.intel.com> * * ----------------------------------------------------------------------- */ /* * Compute the desired load offset from a compressed program; outputs * a small assembly wrapper with the appropriate symbols defined. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <inttypes.h> static uint32_t getle32(const void *p) { const uint8_t *cp = p; return (uint32_t)cp[0] + ((uint32_t)cp[1] << 8) + ((uint32_t)cp[2] << 16) + ((uint32_t)cp[3] << 24); } int main(int argc, char *argv[]) { uint32_t olen; long ilen; unsigned long offs; FILE *f; if (argc < 2) { fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); return 1; } /* Get the information for the compressed kernel image first */ f = fopen(argv[1], "r"); if (!f) { perror(argv[1]); return 1; } if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } fread(&olen, sizeof olen, 1, f); ilen = ftell(f); olen = getle32(&olen); fclose(f); /* * Now we have the input (compressed) and output (uncompressed) * sizes, compute the necessary decompression offset... */ offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; /* Add 8 bytes for each 32K block */ offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ printf(".section \".rodata.compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); printf("z_output_len = %lu\n", (unsigned long)olen); printf(".globl z_extract_offset\n"); printf("z_extract_offset = 0x%lx\n", offs); /* z_extract_offset_negative allows simplification of head_32.S */ printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); return 0; }
gpl-2.0
fileton/linux
net/ipv4/netfilter/iptable_filter.c
611
2946
/* * This is the 1999 rewrite of IP Firewalling, aiming for kernel 2.3.x. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/slab.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("iptables filter table"); #define FILTER_VALID_HOOKS ((1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT)) static const struct xt_table packet_filter = { .name = "filter", .valid_hooks = FILTER_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, .priority = NF_IP_PRI_FILTER, }; static unsigned int iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct nf_hook_state *state) { const struct net *net; if (ops->hooknum == NF_INET_LOCAL_OUT && (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr))) /* root is playing with raw sockets. */ return NF_ACCEPT; net = dev_net(state->in ? state->in : state->out); return ipt_do_table(skb, ops->hooknum, state, net->ipv4.iptable_filter); } static struct nf_hook_ops *filter_ops __read_mostly; /* Default to forward because I got too much mail already. */ static bool forward = true; module_param(forward, bool, 0000); static int __net_init iptable_filter_net_init(struct net *net) { struct ipt_replace *repl; repl = ipt_alloc_initial_table(&packet_filter); if (repl == NULL) return -ENOMEM; /* Entry 1 is the FORWARD hook */ ((struct ipt_standard *)repl->entries)[1].target.verdict = forward ? -NF_ACCEPT - 1 : -NF_DROP - 1; net->ipv4.iptable_filter = ipt_register_table(net, &packet_filter, repl); kfree(repl); return PTR_ERR_OR_ZERO(net->ipv4.iptable_filter); } static void __net_exit iptable_filter_net_exit(struct net *net) { ipt_unregister_table(net, net->ipv4.iptable_filter); } static struct pernet_operations iptable_filter_net_ops = { .init = iptable_filter_net_init, .exit = iptable_filter_net_exit, }; static int __init iptable_filter_init(void) { int ret; ret = register_pernet_subsys(&iptable_filter_net_ops); if (ret < 0) return ret; /* Register hooks */ filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook); if (IS_ERR(filter_ops)) { ret = PTR_ERR(filter_ops); unregister_pernet_subsys(&iptable_filter_net_ops); } return ret; } static void __exit iptable_filter_fini(void) { xt_hook_unlink(&packet_filter, filter_ops); unregister_pernet_subsys(&iptable_filter_net_ops); } module_init(iptable_filter_init); module_exit(iptable_filter_fini);
gpl-2.0
CSE3320/kernel-code
.backup_do_not_remove/arch/arm/kernel/xscale-cp0.c
611
3976
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/xscale-cp0.c * * XScale DSP and iWMMXt coprocessor context switching and handling */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <asm/thread_notify.h> #include <asm/cputype.h> asm(" .arch armv5te\n"); static inline void dsp_save_state(u32 *state) { __asm__ __volatile__ ( "mrrc p0, 0, %0, %1, c0\n" : "=r" (state[0]), "=r" (state[1])); } static inline void dsp_load_state(u32 *state) { __asm__ __volatile__ ( "mcrr p0, 0, %0, %1, c0\n" : : "r" (state[0]), "r" (state[1])); } static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: thread->cpu_context.extra[0] = 0; thread->cpu_context.extra[1] = 0; break; case THREAD_NOTIFY_SWITCH: dsp_save_state(current_thread_info()->cpu_context.extra); dsp_load_state(thread->cpu_context.extra); break; } return NOTIFY_DONE; } static struct notifier_block dsp_notifier_block = { .notifier_call = dsp_do, }; #ifdef CONFIG_IWMMXT static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: /* * flush_thread() zeroes thread->fpstate, so no need * to do anything here. * * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: iwmmxt_task_release(thread); break; case THREAD_NOTIFY_SWITCH: iwmmxt_task_switch(thread); break; } return NOTIFY_DONE; } static struct notifier_block iwmmxt_notifier_block = { .notifier_call = iwmmxt_do, }; #endif static u32 __init xscale_cp_access_read(void) { u32 value; __asm__ __volatile__ ( "mrc p15, 0, %0, c15, c1, 0\n\t" : "=r" (value)); return value; } static void __init xscale_cp_access_write(u32 value) { u32 temp; __asm__ __volatile__ ( "mcr p15, 0, %1, c15, c1, 0\n\t" "mrc p15, 0, %0, c15, c1, 0\n\t" "mov %0, %0\n\t" "sub pc, pc, #4\n\t" : "=r" (temp) : "r" (value)); } /* * Detect whether we have a MAC coprocessor (40 bit register) or an * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000 * into a coprocessor register and reading it back, and checking * whether the upper word survived intact. */ static int __init cpu_has_iwmmxt(void) { u32 lo; u32 hi; /* * This sequence is interpreted by the DSP coprocessor as: * mar acc0, %2, %3 * mra %0, %1, acc0 * * And by the iWMMXt coprocessor as: * tmcrr wR0, %2, %3 * tmrrc %0, %1, wR0 */ __asm__ __volatile__ ( "mcrr p0, 0, %2, %3, c0\n" "mrrc p0, 0, %0, %1, c0\n" : "=r" (lo), "=r" (hi) : "r" (0), "r" (0x100)); return !!hi; } /* * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy * switch code handle iWMMXt context switching. If on the other * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled * all the time, and save/restore acc0 on context switch in non-lazy * fashion. */ static int __init xscale_cp0_init(void) { u32 cp_access; /* do not attempt to probe iwmmxt on non-xscale family CPUs */ if (!cpu_is_xscale_family()) return 0; cp_access = xscale_cp_access_read() & ~3; xscale_cp_access_write(cp_access | 1); if (cpu_has_iwmmxt()) { #ifndef CONFIG_IWMMXT pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n"); #else pr_info("XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); #endif } else { pr_info("XScale DSP coprocessor detected.\n"); thread_register_notifier(&dsp_notifier_block); cp_access |= 1; } xscale_cp_access_write(cp_access); return 0; } late_initcall(xscale_cp0_init);
gpl-2.0
armStrapTools/linux-sunxi-ap6210
arch/frv/kernel/process.c
1123
8199
/* process.c: FRV specific parts of process handling * * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * - Derived from arch/m68k/kernel/process.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/pagemap.h> #include <linux/rcupdate.h> #include <asm/asm-offsets.h> #include <asm/uaccess.h> #include <asm/setup.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <asm/gdb-stub.h> #include <asm/mb-regs.h> #include "local.h" asmlinkage void ret_from_fork(void); #include <asm/pgalloc.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); static void core_sleep_idle(void) { #ifdef LED_DEBUG_SLEEP /* Show that we're sleeping... */ __set_LEDS(0x55aa); #endif frv_cpu_core_sleep(); #ifdef LED_DEBUG_SLEEP /* ... and that we woke up */ __set_LEDS(0); #endif mb(); } void (*idle)(void) = core_sleep_idle; /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { rcu_idle_enter(); while (!need_resched()) { check_pgt_cache(); if (!frv_dma_inprogress && idle) idle(); } rcu_idle_exit(); schedule_preempt_disabled(); } } void machine_restart(char * __unused) { unsigned long reset_addr; #ifdef CONFIG_GDBSTUB gdbstub_exit(0); #endif if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551) reset_addr = 0xfefff500; else reset_addr = 0xfeff0500; /* Software reset. */ asm volatile(" dcef @(gr0,gr0),1 ! membar !" " sti %1,@(%0,0) !" " nop ! nop ! nop ! nop ! nop ! " " nop ! nop ! nop ! nop ! nop ! " " nop ! nop ! nop ! nop ! nop ! " " nop ! nop ! nop ! nop ! nop ! " : : "r" (reset_addr), "r" (1) ); for (;;) ; } void machine_halt(void) { #ifdef CONFIG_GDBSTUB gdbstub_exit(0); #endif for (;;); } void machine_power_off(void) { #ifdef CONFIG_GDBSTUB gdbstub_exit(0); #endif for (;;); } void flush_thread(void) { /* nothing */ } inline unsigned long user_stack(const struct pt_regs *regs) { while (regs->next_frame) regs = regs->next_frame; return user_mode(regs) ? regs->sp : 0; } asmlinkage int sys_fork(void) { #ifndef CONFIG_MMU /* fork almost works, enough to trick you into looking elsewhere:-( */ return -EINVAL; #else return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL); #endif } asmlinkage int sys_vfork(void) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL); } /*****************************************************************************/ /* * clone a process * - tlsptr is retrieved by copy_thread() */ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, int __user *parent_tidptr, int __user *child_tidptr, int __user *tlsptr) { if (!newsp) newsp = user_stack(__frame); return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr); } /* end sys_clone() */ /*****************************************************************************/ /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { //unlazy_fpu(tsk); } /* end prepare_to_copy() */ /*****************************************************************************/ /* * set up the kernel stack and exception frames for a new process */ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long topstk, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs0, *childregs, *regs0; regs0 = __kernel_frame0_ptr; childregs0 = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE); childregs = childregs0; /* set up the userspace frame (the only place that the USP is stored) */ *childregs0 = *regs0; childregs0->gr8 = 0; childregs0->sp = usp; childregs0->next_frame = NULL; /* set up the return kernel frame if called from kernel_thread() */ if (regs != regs0) { childregs--; *childregs = *regs; childregs->sp = (unsigned long) childregs0; childregs->next_frame = childregs0; childregs->gr15 = (unsigned long) task_thread_info(p); childregs->gr29 = (unsigned long) p; } p->set_child_tid = p->clear_child_tid = NULL; p->thread.frame = childregs; p->thread.curr = p; p->thread.sp = (unsigned long) childregs; p->thread.fp = 0; p->thread.lr = 0; p->thread.pc = (unsigned long) ret_from_fork; p->thread.frame0 = childregs0; /* the new TLS pointer is passed in as arg #5 to sys_clone() */ if (clone_flags & CLONE_SETTLS) childregs->gr29 = childregs->gr12; save_user_regs(p->thread.user); return 0; } /* end copy_thread() */ /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(const char __user *name, const char __user *const __user *argv, const char __user *const __user *envp) { int error; char * filename; filename = getname(name); error = PTR_ERR(filename); if (IS_ERR(filename)) return error; error = do_execve(filename, argv, envp, __frame); putname(filename); return error; } unsigned long get_wchan(struct task_struct *p) { struct pt_regs *regs0; unsigned long fp, pc; unsigned long stack_limit; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_limit = (unsigned long) (p + 1); fp = p->thread.fp; regs0 = p->thread.frame0; do { if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3) return 0; pc = ((unsigned long *) fp)[2]; /* FIXME: This depends on the order of these functions. */ if (!in_sched_functions(pc)) return pc; fp = *(unsigned long *) fp; } while (count++ < 16); return 0; } unsigned long thread_saved_pc(struct task_struct *tsk) { /* Check whether the thread is blocked in resume() */ if (in_sched_functions(tsk->thread.pc)) return ((unsigned long *)tsk->thread.fp)[2]; else return tsk->thread.pc; } int elf_check_arch(const struct elf32_hdr *hdr) { unsigned long hsr0 = __get_HSR(0); unsigned long psr = __get_PSR(); if (hdr->e_machine != EM_FRV) return 0; switch (hdr->e_flags & EF_FRV_GPR_MASK) { case EF_FRV_GPR64: if ((hsr0 & HSR0_GRN) == HSR0_GRN_32) return 0; case EF_FRV_GPR32: case 0: break; default: return 0; } switch (hdr->e_flags & EF_FRV_FPR_MASK) { case EF_FRV_FPR64: if ((hsr0 & HSR0_FRN) == HSR0_FRN_32) return 0; case EF_FRV_FPR32: case EF_FRV_FPR_NONE: case 0: break; default: return 0; } if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD) if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 && PSR_IMPLE(psr) != PSR_IMPLE_FR451) return 0; switch (hdr->e_flags & EF_FRV_CPU_MASK) { case EF_FRV_CPU_GENERIC: break; case EF_FRV_CPU_FR300: case EF_FRV_CPU_SIMPLE: case EF_FRV_CPU_TOMCAT: default: return 0; case EF_FRV_CPU_FR400: if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 && PSR_IMPLE(psr) != PSR_IMPLE_FR405 && PSR_IMPLE(psr) != PSR_IMPLE_FR451 && PSR_IMPLE(psr) != PSR_IMPLE_FR551) return 0; break; case EF_FRV_CPU_FR450: if (PSR_IMPLE(psr) != PSR_IMPLE_FR451) return 0; break; case EF_FRV_CPU_FR500: if (PSR_IMPLE(psr) != PSR_IMPLE_FR501) return 0; break; case EF_FRV_CPU_FR550: if (PSR_IMPLE(psr) != PSR_IMPLE_FR551) return 0; break; } return 1; } int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) { memcpy(fpregs, &current->thread.user->f, sizeof(current->thread.user->f)); return 1; }
gpl-2.0
friedrich420/Note4-TMO-AELKernel
drivers/staging/zcache/debug.c
2147
3472
#include <linux/atomic.h> #include "debug.h" #ifdef CONFIG_ZCACHE_DEBUG #include <linux/debugfs.h> ssize_t zcache_obj_count; ssize_t zcache_obj_count_max; ssize_t zcache_objnode_count; ssize_t zcache_objnode_count_max; u64 zcache_eph_zbytes; u64 zcache_eph_zbytes_max; u64 zcache_pers_zbytes_max; ssize_t zcache_eph_pageframes_max; ssize_t zcache_pers_pageframes_max; ssize_t zcache_pageframes_alloced; ssize_t zcache_pageframes_freed; ssize_t zcache_eph_zpages; ssize_t zcache_eph_zpages_max; ssize_t zcache_pers_zpages_max; ssize_t zcache_flush_total; ssize_t zcache_flush_found; ssize_t zcache_flobj_total; ssize_t zcache_flobj_found; ssize_t zcache_failed_eph_puts; ssize_t zcache_failed_pers_puts; ssize_t zcache_failed_getfreepages; ssize_t zcache_failed_alloc; ssize_t zcache_put_to_flush; ssize_t zcache_compress_poor; ssize_t zcache_mean_compress_poor; ssize_t zcache_eph_ate_tail; ssize_t zcache_eph_ate_tail_failed; ssize_t zcache_pers_ate_eph; ssize_t zcache_pers_ate_eph_failed; ssize_t zcache_evicted_eph_zpages; ssize_t zcache_evicted_eph_pageframes; ssize_t zcache_zero_filled_pages; ssize_t zcache_zero_filled_pages_max; #define ATTR(x) { .name = #x, .val = &zcache_##x, } static struct debug_entry { const char *name; ssize_t *val; } attrs[] = { ATTR(obj_count), ATTR(obj_count_max), ATTR(objnode_count), ATTR(objnode_count_max), ATTR(flush_total), ATTR(flush_found), ATTR(flobj_total), ATTR(flobj_found), ATTR(failed_eph_puts), ATTR(failed_pers_puts), ATTR(failed_getfreepages), ATTR(failed_alloc), ATTR(put_to_flush), ATTR(compress_poor), ATTR(mean_compress_poor), ATTR(eph_ate_tail), ATTR(eph_ate_tail_failed), ATTR(pers_ate_eph), ATTR(pers_ate_eph_failed), ATTR(evicted_eph_zpages), ATTR(evicted_eph_pageframes), ATTR(eph_pageframes), ATTR(eph_pageframes_max), ATTR(pers_pageframes), ATTR(pers_pageframes_max), ATTR(eph_zpages), ATTR(eph_zpages_max), ATTR(pers_zpages), ATTR(pers_zpages_max), ATTR(last_active_file_pageframes), ATTR(last_inactive_file_pageframes), ATTR(last_active_anon_pageframes), ATTR(last_inactive_anon_pageframes), ATTR(eph_nonactive_puts_ignored), ATTR(pers_nonactive_puts_ignored), ATTR(zero_filled_pages), #ifdef CONFIG_ZCACHE_WRITEBACK ATTR(outstanding_writeback_pages), ATTR(writtenback_pages), #endif }; #undef ATTR int zcache_debugfs_init(void) { unsigned int i; struct dentry *root = debugfs_create_dir("zcache", NULL); if (root == NULL) return -ENXIO; for (i = 0; i < ARRAY_SIZE(attrs); i++) if (!debugfs_create_size_t(attrs[i].name, S_IRUGO, root, attrs[i].val)) goto out; debugfs_create_u64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes); debugfs_create_u64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max); debugfs_create_u64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes); debugfs_create_u64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max); return 0; out: return -ENODEV; } /* developers can call this in case of ooms, e.g. to find memory leaks */ void zcache_dump(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(attrs); i++) pr_debug("zcache: %s=%zu\n", attrs[i].name, *attrs[i].val); pr_debug("zcache: eph_zbytes=%llu\n", (unsigned long long)zcache_eph_zbytes); pr_debug("zcache: eph_zbytes_max=%llu\n", (unsigned long long)zcache_eph_zbytes_max); pr_debug("zcache: pers_zbytes=%llu\n", (unsigned long long)zcache_pers_zbytes); pr_debug("zcache: pers_zbytes_max=%llu\n", (unsigned long long)zcache_pers_zbytes_max); } #endif
gpl-2.0
MinimalOS/android_kernel_lge_mako
arch/arm/mach-msm/board-8930-gpu.c
2403
4354
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/platform_device.h> #include <mach/kgsl.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/socinfo.h> #include "devices.h" #include "board-8930.h" #ifdef CONFIG_MSM_BUS_SCALING static struct msm_bus_vectors grp3d_init_vectors[] = { { .src = MSM_BUS_MASTER_GRAPHICS_3D, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors grp3d_low_vectors[] = { { .src = MSM_BUS_MASTER_GRAPHICS_3D, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = KGSL_CONVERT_TO_MBPS(1000), }, }; static struct msm_bus_vectors grp3d_nominal_vectors[] = { { .src = MSM_BUS_MASTER_GRAPHICS_3D, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = KGSL_CONVERT_TO_MBPS(2656), }, }; static struct msm_bus_vectors grp3d_max_vectors[] = { { .src = MSM_BUS_MASTER_GRAPHICS_3D, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = KGSL_CONVERT_TO_MBPS(4264), }, }; static struct msm_bus_paths grp3d_bus_scale_usecases[] = { { ARRAY_SIZE(grp3d_init_vectors), grp3d_init_vectors, }, { ARRAY_SIZE(grp3d_low_vectors), grp3d_low_vectors, }, { ARRAY_SIZE(grp3d_nominal_vectors), grp3d_nominal_vectors, }, { ARRAY_SIZE(grp3d_max_vectors), grp3d_max_vectors, }, }; static struct msm_bus_scale_pdata grp3d_bus_scale_pdata = { grp3d_bus_scale_usecases, ARRAY_SIZE(grp3d_bus_scale_usecases), .name = "grp3d", }; #endif static struct resource kgsl_3d0_resources[] = { { .name = KGSL_3D0_REG_MEMORY, .start = 0x04300000, /* GFX3D address */ .end = 0x0430ffff, .flags = IORESOURCE_MEM, }, { .name = KGSL_3D0_SHADER_MEMORY, .start = 0x04310000, .end = 0x0431ffff, .flags = IORESOURCE_MEM, }, { .name = KGSL_3D0_IRQ, .start = GFX3D_IRQ, .end = GFX3D_IRQ, .flags = IORESOURCE_IRQ, }, }; static const struct kgsl_iommu_ctx kgsl_3d0_iommu0_ctxs[] = { { "gfx3d_user", 0 }, { "gfx3d_priv", 1 }, }; static struct kgsl_device_iommu_data kgsl_3d0_iommu_data[] = { { .iommu_ctxs = kgsl_3d0_iommu0_ctxs, .iommu_ctx_count = ARRAY_SIZE(kgsl_3d0_iommu0_ctxs), .physstart = 0x07C00000, .physend = 0x07C00000 + SZ_1M - 1, }, }; static struct kgsl_device_platform_data kgsl_3d0_pdata = { .pwrlevel = { { .gpu_freq = 400000000, .bus_freq = 3, .io_fraction = 0, }, { .gpu_freq = 320000000, .bus_freq = 2, .io_fraction = 33, }, { .gpu_freq = 192000000, .bus_freq = 1, .io_fraction = 100, }, { .gpu_freq = 27000000, .bus_freq = 0, }, }, .init_level = 1, .num_levels = 4, .set_grp_async = NULL, .idle_timeout = HZ/12, .strtstp_sleepwake = false, .clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE, #ifdef CONFIG_MSM_BUS_SCALING .bus_scale_table = &grp3d_bus_scale_pdata, #endif .iommu_data = kgsl_3d0_iommu_data, .iommu_count = ARRAY_SIZE(kgsl_3d0_iommu_data), }; static struct platform_device device_kgsl_3d0 = { .name = "kgsl-3d0", .id = 0, .num_resources = ARRAY_SIZE(kgsl_3d0_resources), .resource = kgsl_3d0_resources, .dev = { .platform_data = &kgsl_3d0_pdata, }, }; void __init msm8930_init_gpu(void) { unsigned int version = socinfo_get_version(); /* Set the turbo speed for the AA and AB respectively */ if (cpu_is_msm8930aa()) kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 450000000; else if (cpu_is_msm8930ab()) { kgsl_3d0_pdata.pwrlevel[0].gpu_freq = 500000000; grp3d_max_vectors[0].ib = KGSL_CONVERT_TO_MBPS(4800); } /* Set up the chip ID based on the SoC version */ if (cpu_is_msm8930ab()) kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 0, 5, 3); else if ((SOCINFO_VERSION_MAJOR(version) == 1) && (SOCINFO_VERSION_MINOR(version) == 2)) kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 0, 5, 2); else kgsl_3d0_pdata.chipid = ADRENO_CHIPID(3, 0, 5, 0); platform_device_register(&device_kgsl_3d0); }
gpl-2.0
ea4862/boeffla43_m440s
drivers/watchdog/bcm47xx_wdt.c
2915
6281
/* * Watchdog driver for Broadcom BCM47XX * * Copyright (C) 2008 Aleksandar Radovanovic <biblbroks@sezampro.rs> * Copyright (C) 2009 Matthieu CASTET <castet.matthieu@free.fr> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/ssb/ssb_embedded.h> #include <asm/mach-bcm47xx/bcm47xx.h> #define DRV_NAME "bcm47xx_wdt" #define WDT_DEFAULT_TIME 30 /* seconds */ #define WDT_MAX_TIME 255 /* seconds */ static int wdt_time = WDT_DEFAULT_TIME; static int nowayout = WATCHDOG_NOWAYOUT; module_param(wdt_time, int, 0); MODULE_PARM_DESC(wdt_time, "Watchdog time in seconds. (default=" __MODULE_STRING(WDT_DEFAULT_TIME) ")"); #ifdef CONFIG_WATCHDOG_NOWAYOUT module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #endif static unsigned long bcm47xx_wdt_busy; static char expect_release; static struct timer_list wdt_timer; static atomic_t ticks; static inline void bcm47xx_wdt_hw_start(void) { /* this is 2,5s on 100Mhz clock and 2s on 133 Mhz */ ssb_watchdog_timer_set(&ssb_bcm47xx, 0xfffffff); } static inline int bcm47xx_wdt_hw_stop(void) { return ssb_watchdog_timer_set(&ssb_bcm47xx, 0); } static void bcm47xx_timer_tick(unsigned long unused) { if (!atomic_dec_and_test(&ticks)) { bcm47xx_wdt_hw_start(); mod_timer(&wdt_timer, jiffies + HZ); } else { printk(KERN_CRIT DRV_NAME "Watchdog will fire soon!!!\n"); } } static inline void bcm47xx_wdt_pet(void) { atomic_set(&ticks, wdt_time); } static void bcm47xx_wdt_start(void) { bcm47xx_wdt_pet(); bcm47xx_timer_tick(0); } static void bcm47xx_wdt_pause(void) { del_timer_sync(&wdt_timer); bcm47xx_wdt_hw_stop(); } static void bcm47xx_wdt_stop(void) { bcm47xx_wdt_pause(); } static int bcm47xx_wdt_settimeout(int new_time) { if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) return -EINVAL; wdt_time = new_time; return 0; } static int bcm47xx_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &bcm47xx_wdt_busy)) return -EBUSY; bcm47xx_wdt_start(); return nonseekable_open(inode, file); } static int bcm47xx_wdt_release(struct inode *inode, struct file *file) { if (expect_release == 42) { bcm47xx_wdt_stop(); } else { printk(KERN_CRIT DRV_NAME ": Unexpected close, not stopping watchdog!\n"); bcm47xx_wdt_start(); } clear_bit(0, &bcm47xx_wdt_busy); expect_release = 0; return 0; } static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; expect_release = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } bcm47xx_wdt_pet(); } return len; } static const struct watchdog_info bcm47xx_wdt_info = { .identity = DRV_NAME, .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; static long bcm47xx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_value, retval = -EINVAL; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &bcm47xx_wdt_info, sizeof(bcm47xx_wdt_info)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: if (get_user(new_value, p)) return -EFAULT; if (new_value & WDIOS_DISABLECARD) { bcm47xx_wdt_stop(); retval = 0; } if (new_value & WDIOS_ENABLECARD) { bcm47xx_wdt_start(); retval = 0; } return retval; case WDIOC_KEEPALIVE: bcm47xx_wdt_pet(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_value, p)) return -EFAULT; if (bcm47xx_wdt_settimeout(new_value)) return -EINVAL; bcm47xx_wdt_pet(); case WDIOC_GETTIMEOUT: return put_user(wdt_time, p); default: return -ENOTTY; } } static int bcm47xx_wdt_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) bcm47xx_wdt_stop(); return NOTIFY_DONE; } static const struct file_operations bcm47xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = bcm47xx_wdt_ioctl, .open = bcm47xx_wdt_open, .release = bcm47xx_wdt_release, .write = bcm47xx_wdt_write, }; static struct miscdevice bcm47xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &bcm47xx_wdt_fops, }; static struct notifier_block bcm47xx_wdt_notifier = { .notifier_call = bcm47xx_wdt_notify_sys, }; static int __init bcm47xx_wdt_init(void) { int ret; if (bcm47xx_wdt_hw_stop() < 0) return -ENODEV; setup_timer(&wdt_timer, bcm47xx_timer_tick, 0L); if (bcm47xx_wdt_settimeout(wdt_time)) { bcm47xx_wdt_settimeout(WDT_DEFAULT_TIME); printk(KERN_INFO DRV_NAME ": " "wdt_time value must be 0 < wdt_time < %d, using %d\n", (WDT_MAX_TIME + 1), wdt_time); } ret = register_reboot_notifier(&bcm47xx_wdt_notifier); if (ret) return ret; ret = misc_register(&bcm47xx_wdt_miscdev); if (ret) { unregister_reboot_notifier(&bcm47xx_wdt_notifier); return ret; } printk(KERN_INFO "BCM47xx Watchdog Timer enabled (%d seconds%s)\n", wdt_time, nowayout ? ", nowayout" : ""); return 0; } static void __exit bcm47xx_wdt_exit(void) { if (!nowayout) bcm47xx_wdt_stop(); misc_deregister(&bcm47xx_wdt_miscdev); unregister_reboot_notifier(&bcm47xx_wdt_notifier); } module_init(bcm47xx_wdt_init); module_exit(bcm47xx_wdt_exit); MODULE_AUTHOR("Aleksandar Radovanovic"); MODULE_DESCRIPTION("Watchdog driver for Broadcom BCM47xx"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
CyanogenMod/android_kernel_htc_enrc2b
fs/bio-integrity.c
2915
21703
/* * bio-integrity.c - bio data integrity extensions * * Copyright (C) 2007, 2008, 2009 Oracle Corporation * Written by: Martin K. Petersen <martin.petersen@oracle.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * */ #include <linux/blkdev.h> #include <linux/mempool.h> #include <linux/bio.h> #include <linux/workqueue.h> #include <linux/slab.h> struct integrity_slab { struct kmem_cache *slab; unsigned short nr_vecs; char name[8]; }; #define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) } struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = { IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES), }; #undef IS static struct workqueue_struct *kintegrityd_wq; static inline unsigned int vecs_to_idx(unsigned int nr) { switch (nr) { case 1: return 0; case 2 ... 4: return 1; case 5 ... 16: return 2; case 17 ... 64: return 3; case 65 ... 128: return 4; case 129 ... BIO_MAX_PAGES: return 5; default: BUG(); } } static inline int use_bip_pool(unsigned int idx) { if (idx == BIOVEC_MAX_IDX) return 1; return 0; } /** * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to * @gfp_mask: Memory allocation mask * @nr_vecs: Number of integrity metadata scatter-gather elements * @bs: bio_set to allocate from * * Description: This function prepares a bio for attaching integrity * metadata. nr_vecs specifies the maximum number of pages containing * integrity metadata that can be attached. */ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, gfp_t gfp_mask, unsigned int nr_vecs, struct bio_set *bs) { struct bio_integrity_payload *bip; unsigned int idx = vecs_to_idx(nr_vecs); BUG_ON(bio == NULL); bip = NULL; /* Lower order allocations come straight from slab */ if (!use_bip_pool(idx)) bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask); /* Use mempool if lower order alloc failed or max vecs were requested */ if (bip == NULL) { idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); if (unlikely(bip == NULL)) { printk(KERN_ERR "%s: could not alloc bip\n", __func__); return NULL; } } memset(bip, 0, sizeof(*bip)); bip->bip_slab = idx; bip->bip_bio = bio; bio->bi_integrity = bip; return bip; } EXPORT_SYMBOL(bio_integrity_alloc_bioset); /** * bio_integrity_alloc - Allocate integrity payload and attach it to bio * @bio: bio to attach integrity metadata to * @gfp_mask: Memory allocation mask * @nr_vecs: Number of integrity metadata scatter-gather elements * * Description: This function prepares a bio for attaching integrity * metadata. nr_vecs specifies the maximum number of pages containing * integrity metadata that can be attached. */ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp_mask, unsigned int nr_vecs) { return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set); } EXPORT_SYMBOL(bio_integrity_alloc); /** * bio_integrity_free - Free bio integrity payload * @bio: bio containing bip to be freed * @bs: bio_set this bio was allocated from * * Description: Used to free the integrity portion of a bio. Usually * called from bio_free(). */ void bio_integrity_free(struct bio *bio, struct bio_set *bs) { struct bio_integrity_payload *bip = bio->bi_integrity; BUG_ON(bip == NULL); /* A cloned bio doesn't own the integrity metadata */ if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY) && bip->bip_buf != NULL) kfree(bip->bip_buf); if (use_bip_pool(bip->bip_slab)) mempool_free(bip, bs->bio_integrity_pool); else kmem_cache_free(bip_slab[bip->bip_slab].slab, bip); bio->bi_integrity = NULL; } EXPORT_SYMBOL(bio_integrity_free); /** * bio_integrity_add_page - Attach integrity metadata * @bio: bio to update * @page: page containing integrity metadata * @len: number of bytes of integrity metadata in page * @offset: start offset within page * * Description: Attach a page containing integrity metadata to bio. */ int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { struct bio_integrity_payload *bip = bio->bi_integrity; struct bio_vec *iv; if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) { printk(KERN_ERR "%s: bip_vec full\n", __func__); return 0; } iv = bip_vec_idx(bip, bip->bip_vcnt); BUG_ON(iv == NULL); iv->bv_page = page; iv->bv_len = len; iv->bv_offset = offset; bip->bip_vcnt++; return len; } EXPORT_SYMBOL(bio_integrity_add_page); static int bdev_integrity_enabled(struct block_device *bdev, int rw) { struct blk_integrity *bi = bdev_get_integrity(bdev); if (bi == NULL) return 0; if (rw == READ && bi->verify_fn != NULL && (bi->flags & INTEGRITY_FLAG_READ)) return 1; if (rw == WRITE && bi->generate_fn != NULL && (bi->flags & INTEGRITY_FLAG_WRITE)) return 1; return 0; } /** * bio_integrity_enabled - Check whether integrity can be passed * @bio: bio to check * * Description: Determines whether bio_integrity_prep() can be called * on this bio or not. bio data direction and target device must be * set prior to calling. The functions honors the write_generate and * read_verify flags in sysfs. */ int bio_integrity_enabled(struct bio *bio) { /* Already protected? */ if (bio_integrity(bio)) return 0; return bdev_integrity_enabled(bio->bi_bdev, bio_data_dir(bio)); } EXPORT_SYMBOL(bio_integrity_enabled); /** * bio_integrity_hw_sectors - Convert 512b sectors to hardware ditto * @bi: blk_integrity profile for device * @sectors: Number of 512 sectors to convert * * Description: The block layer calculates everything in 512 byte * sectors but integrity metadata is done in terms of the hardware * sector size of the storage device. Convert the block layer sectors * to physical sectors. */ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi, unsigned int sectors) { /* At this point there are only 512b or 4096b DIF/EPP devices */ if (bi->sector_size == 4096) return sectors >>= 3; return sectors; } /** * bio_integrity_tag_size - Retrieve integrity tag space * @bio: bio to inspect * * Description: Returns the maximum number of tag bytes that can be * attached to this bio. Filesystems can use this to determine how * much metadata to attach to an I/O. */ unsigned int bio_integrity_tag_size(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); BUG_ON(bio->bi_size == 0); return bi->tag_size * (bio->bi_size / bi->sector_size); } EXPORT_SYMBOL(bio_integrity_tag_size); int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set) { struct bio_integrity_payload *bip = bio->bi_integrity; struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); unsigned int nr_sectors; BUG_ON(bip->bip_buf == NULL); if (bi->tag_size == 0) return -1; nr_sectors = bio_integrity_hw_sectors(bi, DIV_ROUND_UP(len, bi->tag_size)); if (nr_sectors * bi->tuple_size > bip->bip_size) { printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__, nr_sectors * bi->tuple_size, bip->bip_size); return -1; } if (set) bi->set_tag_fn(bip->bip_buf, tag_buf, nr_sectors); else bi->get_tag_fn(bip->bip_buf, tag_buf, nr_sectors); return 0; } /** * bio_integrity_set_tag - Attach a tag buffer to a bio * @bio: bio to attach buffer to * @tag_buf: Pointer to a buffer containing tag data * @len: Length of the included buffer * * Description: Use this function to tag a bio by leveraging the extra * space provided by devices formatted with integrity protection. The * size of the integrity buffer must be <= to the size reported by * bio_integrity_tag_size(). */ int bio_integrity_set_tag(struct bio *bio, void *tag_buf, unsigned int len) { BUG_ON(bio_data_dir(bio) != WRITE); return bio_integrity_tag(bio, tag_buf, len, 1); } EXPORT_SYMBOL(bio_integrity_set_tag); /** * bio_integrity_get_tag - Retrieve a tag buffer from a bio * @bio: bio to retrieve buffer from * @tag_buf: Pointer to a buffer for the tag data * @len: Length of the target buffer * * Description: Use this function to retrieve the tag buffer from a * completed I/O. The size of the integrity buffer must be <= to the * size reported by bio_integrity_tag_size(). */ int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len) { BUG_ON(bio_data_dir(bio) != READ); return bio_integrity_tag(bio, tag_buf, len, 0); } EXPORT_SYMBOL(bio_integrity_get_tag); /** * bio_integrity_generate - Generate integrity metadata for a bio * @bio: bio to generate integrity metadata for * * Description: Generates integrity metadata for a bio by calling the * block device's generation callback function. The bio must have a * bip attached with enough room to accommodate the generated * integrity metadata. */ static void bio_integrity_generate(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity_exchg bix; struct bio_vec *bv; sector_t sector = bio->bi_sector; unsigned int i, sectors, total; void *prot_buf = bio->bi_integrity->bip_buf; total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; bio_for_each_segment(bv, bio, i) { void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); bix.data_buf = kaddr + bv->bv_offset; bix.data_size = bv->bv_len; bix.prot_buf = prot_buf; bix.sector = sector; bi->generate_fn(&bix); sectors = bv->bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; total += sectors * bi->tuple_size; BUG_ON(total > bio->bi_integrity->bip_size); kunmap_atomic(kaddr, KM_USER0); } } static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi) { if (bi) return bi->tuple_size; return 0; } /** * bio_integrity_prep - Prepare bio for integrity I/O * @bio: bio to prepare * * Description: Allocates a buffer for integrity metadata, maps the * pages and attaches them to a bio. The bio must have data * direction, target device and start sector set priot to calling. In * the WRITE case, integrity metadata will be generated using the * block device's integrity function. In the READ case, the buffer * will be prepared for DMA and a suitable end_io handler set up. */ int bio_integrity_prep(struct bio *bio) { struct bio_integrity_payload *bip; struct blk_integrity *bi; struct request_queue *q; void *buf; unsigned long start, end; unsigned int len, nr_pages; unsigned int bytes, offset, i; unsigned int sectors; bi = bdev_get_integrity(bio->bi_bdev); q = bdev_get_queue(bio->bi_bdev); BUG_ON(bi == NULL); BUG_ON(bio_integrity(bio)); sectors = bio_integrity_hw_sectors(bi, bio_sectors(bio)); /* Allocate kernel buffer for protection data */ len = sectors * blk_integrity_tuple_size(bi); buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); if (unlikely(buf == NULL)) { printk(KERN_ERR "could not allocate integrity buffer\n"); return -ENOMEM; } end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; start = ((unsigned long) buf) >> PAGE_SHIFT; nr_pages = end - start; /* Allocate bio integrity payload and integrity vectors */ bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); if (unlikely(bip == NULL)) { printk(KERN_ERR "could not allocate data integrity bioset\n"); kfree(buf); return -EIO; } bip->bip_buf = buf; bip->bip_size = len; bip->bip_sector = bio->bi_sector; /* Map it */ offset = offset_in_page(buf); for (i = 0 ; i < nr_pages ; i++) { int ret; bytes = PAGE_SIZE - offset; if (len <= 0) break; if (bytes > len) bytes = len; ret = bio_integrity_add_page(bio, virt_to_page(buf), bytes, offset); if (ret == 0) return 0; if (ret < bytes) break; buf += bytes; len -= bytes; offset = 0; } /* Install custom I/O completion handler if read verify is enabled */ if (bio_data_dir(bio) == READ) { bip->bip_end_io = bio->bi_end_io; bio->bi_end_io = bio_integrity_endio; } /* Auto-generate integrity metadata if this is a write */ if (bio_data_dir(bio) == WRITE) bio_integrity_generate(bio); return 0; } EXPORT_SYMBOL(bio_integrity_prep); /** * bio_integrity_verify - Verify integrity metadata for a bio * @bio: bio to verify * * Description: This function is called to verify the integrity of a * bio. The data in the bio io_vec is compared to the integrity * metadata returned by the HBA. */ static int bio_integrity_verify(struct bio *bio) { struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity_exchg bix; struct bio_vec *bv; sector_t sector = bio->bi_integrity->bip_sector; unsigned int i, sectors, total, ret; void *prot_buf = bio->bi_integrity->bip_buf; ret = total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; bio_for_each_segment(bv, bio, i) { void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); bix.data_buf = kaddr + bv->bv_offset; bix.data_size = bv->bv_len; bix.prot_buf = prot_buf; bix.sector = sector; ret = bi->verify_fn(&bix); if (ret) { kunmap_atomic(kaddr, KM_USER0); return ret; } sectors = bv->bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; total += sectors * bi->tuple_size; BUG_ON(total > bio->bi_integrity->bip_size); kunmap_atomic(kaddr, KM_USER0); } return ret; } /** * bio_integrity_verify_fn - Integrity I/O completion worker * @work: Work struct stored in bio to be verified * * Description: This workqueue function is called to complete a READ * request. The function verifies the transferred integrity metadata * and then calls the original bio end_io function. */ static void bio_integrity_verify_fn(struct work_struct *work) { struct bio_integrity_payload *bip = container_of(work, struct bio_integrity_payload, bip_work); struct bio *bio = bip->bip_bio; int error; error = bio_integrity_verify(bio); /* Restore original bio completion handler */ bio->bi_end_io = bip->bip_end_io; bio_endio(bio, error); } /** * bio_integrity_endio - Integrity I/O completion function * @bio: Protected bio * @error: Pointer to errno * * Description: Completion for integrity I/O * * Normally I/O completion is done in interrupt context. However, * verifying I/O integrity is a time-consuming task which must be run * in process context. This function postpones completion * accordingly. */ void bio_integrity_endio(struct bio *bio, int error) { struct bio_integrity_payload *bip = bio->bi_integrity; BUG_ON(bip->bip_bio != bio); /* In case of an I/O error there is no point in verifying the * integrity metadata. Restore original bio end_io handler * and run it. */ if (error) { bio->bi_end_io = bip->bip_end_io; bio_endio(bio, error); return; } INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); queue_work(kintegrityd_wq, &bip->bip_work); } EXPORT_SYMBOL(bio_integrity_endio); /** * bio_integrity_mark_head - Advance bip_vec skip bytes * @bip: Integrity vector to advance * @skip: Number of bytes to advance it */ void bio_integrity_mark_head(struct bio_integrity_payload *bip, unsigned int skip) { struct bio_vec *iv; unsigned int i; bip_for_each_vec(iv, bip, i) { if (skip == 0) { bip->bip_idx = i; return; } else if (skip >= iv->bv_len) { skip -= iv->bv_len; } else { /* skip < iv->bv_len) */ iv->bv_offset += skip; iv->bv_len -= skip; bip->bip_idx = i; return; } } } /** * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long * @bip: Integrity vector to truncate * @len: New length of integrity vector */ void bio_integrity_mark_tail(struct bio_integrity_payload *bip, unsigned int len) { struct bio_vec *iv; unsigned int i; bip_for_each_vec(iv, bip, i) { if (len == 0) { bip->bip_vcnt = i; return; } else if (len >= iv->bv_len) { len -= iv->bv_len; } else { /* len < iv->bv_len) */ iv->bv_len = len; len = 0; } } } /** * bio_integrity_advance - Advance integrity vector * @bio: bio whose integrity vector to update * @bytes_done: number of data bytes that have been completed * * Description: This function calculates how many integrity bytes the * number of completed data bytes correspond to and advances the * integrity vector accordingly. */ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) { struct bio_integrity_payload *bip = bio->bi_integrity; struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); unsigned int nr_sectors; BUG_ON(bip == NULL); BUG_ON(bi == NULL); nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9); bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size); } EXPORT_SYMBOL(bio_integrity_advance); /** * bio_integrity_trim - Trim integrity vector * @bio: bio whose integrity vector to update * @offset: offset to first data sector * @sectors: number of data sectors * * Description: Used to trim the integrity vector in a cloned bio. * The ivec will be advanced corresponding to 'offset' data sectors * and the length will be truncated corresponding to 'len' data * sectors. */ void bio_integrity_trim(struct bio *bio, unsigned int offset, unsigned int sectors) { struct bio_integrity_payload *bip = bio->bi_integrity; struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); unsigned int nr_sectors; BUG_ON(bip == NULL); BUG_ON(bi == NULL); BUG_ON(!bio_flagged(bio, BIO_CLONED)); nr_sectors = bio_integrity_hw_sectors(bi, sectors); bip->bip_sector = bip->bip_sector + offset; bio_integrity_mark_head(bip, offset * bi->tuple_size); bio_integrity_mark_tail(bip, sectors * bi->tuple_size); } EXPORT_SYMBOL(bio_integrity_trim); /** * bio_integrity_split - Split integrity metadata * @bio: Protected bio * @bp: Resulting bio_pair * @sectors: Offset * * Description: Splits an integrity page into a bio_pair. */ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors) { struct blk_integrity *bi; struct bio_integrity_payload *bip = bio->bi_integrity; unsigned int nr_sectors; if (bio_integrity(bio) == 0) return; bi = bdev_get_integrity(bio->bi_bdev); BUG_ON(bi == NULL); BUG_ON(bip->bip_vcnt != 1); nr_sectors = bio_integrity_hw_sectors(bi, sectors); bp->bio1.bi_integrity = &bp->bip1; bp->bio2.bi_integrity = &bp->bip2; bp->iv1 = bip->bip_vec[0]; bp->iv2 = bip->bip_vec[0]; bp->bip1.bip_vec[0] = bp->iv1; bp->bip2.bip_vec[0] = bp->iv2; bp->iv1.bv_len = sectors * bi->tuple_size; bp->iv2.bv_offset += sectors * bi->tuple_size; bp->iv2.bv_len -= sectors * bi->tuple_size; bp->bip1.bip_sector = bio->bi_integrity->bip_sector; bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors; bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1; bp->bip1.bip_idx = bp->bip2.bip_idx = 0; } EXPORT_SYMBOL(bio_integrity_split); /** * bio_integrity_clone - Callback for cloning bios with integrity metadata * @bio: New bio * @bio_src: Original bio * @gfp_mask: Memory allocation mask * @bs: bio_set to allocate bip from * * Description: Called to allocate a bip when cloning a bio */ int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask, struct bio_set *bs) { struct bio_integrity_payload *bip_src = bio_src->bi_integrity; struct bio_integrity_payload *bip; BUG_ON(bip_src == NULL); bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs); if (bip == NULL) return -EIO; memcpy(bip->bip_vec, bip_src->bip_vec, bip_src->bip_vcnt * sizeof(struct bio_vec)); bip->bip_sector = bip_src->bip_sector; bip->bip_vcnt = bip_src->bip_vcnt; bip->bip_idx = bip_src->bip_idx; return 0; } EXPORT_SYMBOL(bio_integrity_clone); int bioset_integrity_create(struct bio_set *bs, int pool_size) { unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); if (bs->bio_integrity_pool) return 0; bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); if (!bs->bio_integrity_pool) return -1; return 0; } EXPORT_SYMBOL(bioset_integrity_create); void bioset_integrity_free(struct bio_set *bs) { if (bs->bio_integrity_pool) mempool_destroy(bs->bio_integrity_pool); } EXPORT_SYMBOL(bioset_integrity_free); void __init bio_integrity_init(void) { unsigned int i; /* * kintegrityd won't block much but may burn a lot of CPU cycles. * Make it highpri CPU intensive wq with max concurrency of 1. */ kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); if (!kintegrityd_wq) panic("Failed to create kintegrityd\n"); for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) { unsigned int size; size = sizeof(struct bio_integrity_payload) + bip_slab[i].nr_vecs * sizeof(struct bio_vec); bip_slab[i].slab = kmem_cache_create(bip_slab[i].name, size, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); } }
gpl-2.0
kyapa/linux-3.0.y
sound/soc/mid-x86/mfld_machine.c
2915
12541
/* * mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform * * Copyright (C) 2010 Intel Corp * Author: Vinod Koul <vinod.koul@intel.com> * Author: Harsha Priya <priya.harsha@intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/io.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/jack.h> #include "../codecs/sn95031.h" #define MID_MONO 1 #define MID_STEREO 2 #define MID_MAX_CAP 5 #define MFLD_JACK_INSERT 0x04 enum soc_mic_bias_zones { MFLD_MV_START = 0, /* mic bias volutage range for Headphones*/ MFLD_MV_HP = 400, /* mic bias volutage range for American Headset*/ MFLD_MV_AM_HS = 650, /* mic bias volutage range for Headset*/ MFLD_MV_HS = 2000, MFLD_MV_UNDEFINED, }; static unsigned int hs_switch; static unsigned int lo_dac; struct mfld_mc_private { struct platform_device *socdev; void __iomem *int_base; struct snd_soc_codec *codec; u8 interrupt_status; }; struct snd_soc_jack mfld_jack; /*Headset jack detection DAPM pins */ static struct snd_soc_jack_pin mfld_jack_pins[] = { { .pin = "Headphones", .mask = SND_JACK_HEADPHONE, }, { .pin = "AMIC1", .mask = SND_JACK_MICROPHONE, }, }; /* jack detection voltage zones */ static struct snd_soc_jack_zone mfld_zones[] = { {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE}, {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET}, }; /* sound card controls */ static const char *headset_switch_text[] = {"Earpiece", "Headset"}; static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"}; static const struct soc_enum headset_enum = SOC_ENUM_SINGLE_EXT(2, headset_switch_text); static const struct soc_enum lo_enum = SOC_ENUM_SINGLE_EXT(4, lo_text); static int headset_get_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = hs_switch; return 0; } static int headset_set_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (ucontrol->value.integer.value[0] == hs_switch) return 0; if (ucontrol->value.integer.value[0]) { pr_debug("hs_set HS path\n"); snd_soc_dapm_enable_pin(&codec->dapm, "Headphones"); snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT"); } else { pr_debug("hs_set EP path\n"); snd_soc_dapm_disable_pin(&codec->dapm, "Headphones"); snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT"); } snd_soc_dapm_sync(&codec->dapm); hs_switch = ucontrol->value.integer.value[0]; return 0; } static void lo_enable_out_pins(struct snd_soc_codec *codec) { snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTL"); snd_soc_dapm_enable_pin(&codec->dapm, "IHFOUTR"); snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTL"); snd_soc_dapm_enable_pin(&codec->dapm, "LINEOUTR"); snd_soc_dapm_enable_pin(&codec->dapm, "VIB1OUT"); snd_soc_dapm_enable_pin(&codec->dapm, "VIB2OUT"); if (hs_switch) { snd_soc_dapm_enable_pin(&codec->dapm, "Headphones"); snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT"); } else { snd_soc_dapm_disable_pin(&codec->dapm, "Headphones"); snd_soc_dapm_enable_pin(&codec->dapm, "EPOUT"); } } static int lo_get_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = lo_dac; return 0; } static int lo_set_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); if (ucontrol->value.integer.value[0] == lo_dac) return 0; /* we dont want to work with last state of lineout so just enable all * pins and then disable pins not required */ lo_enable_out_pins(codec); switch (ucontrol->value.integer.value[0]) { case 0: pr_debug("set vibra path\n"); snd_soc_dapm_disable_pin(&codec->dapm, "VIB1OUT"); snd_soc_dapm_disable_pin(&codec->dapm, "VIB2OUT"); snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0); break; case 1: pr_debug("set hs path\n"); snd_soc_dapm_disable_pin(&codec->dapm, "Headphones"); snd_soc_dapm_disable_pin(&codec->dapm, "EPOUT"); snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x22); break; case 2: pr_debug("set spkr path\n"); snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTL"); snd_soc_dapm_disable_pin(&codec->dapm, "IHFOUTR"); snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x44); break; case 3: pr_debug("set null path\n"); snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTL"); snd_soc_dapm_disable_pin(&codec->dapm, "LINEOUTR"); snd_soc_update_bits(codec, SN95031_LOCTL, 0x66, 0x66); break; } snd_soc_dapm_sync(&codec->dapm); lo_dac = ucontrol->value.integer.value[0]; return 0; } static const struct snd_kcontrol_new mfld_snd_controls[] = { SOC_ENUM_EXT("Playback Switch", headset_enum, headset_get_switch, headset_set_switch), SOC_ENUM_EXT("Lineout Mux", lo_enum, lo_get_switch, lo_set_switch), }; static const struct snd_soc_dapm_widget mfld_widgets[] = { SND_SOC_DAPM_HP("Headphones", NULL), SND_SOC_DAPM_MIC("Mic", NULL), }; static const struct snd_soc_dapm_route mfld_map[] = { {"Headphones", NULL, "HPOUTR"}, {"Headphones", NULL, "HPOUTL"}, {"Mic", NULL, "AMIC1"}, }; static void mfld_jack_check(unsigned int intr_status) { struct mfld_jack_data jack_data; jack_data.mfld_jack = &mfld_jack; jack_data.intr_id = intr_status; sn95031_jack_detection(&jack_data); /* TODO: add american headset detection post gpiolib support */ } static int mfld_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_codec *codec = runtime->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret_val; /* Add jack sense widgets */ snd_soc_dapm_new_controls(dapm, mfld_widgets, ARRAY_SIZE(mfld_widgets)); /* Set up the map */ snd_soc_dapm_add_routes(dapm, mfld_map, ARRAY_SIZE(mfld_map)); /* always connected */ snd_soc_dapm_enable_pin(dapm, "Headphones"); snd_soc_dapm_enable_pin(dapm, "Mic"); snd_soc_dapm_sync(dapm); ret_val = snd_soc_add_controls(codec, mfld_snd_controls, ARRAY_SIZE(mfld_snd_controls)); if (ret_val) { pr_err("soc_add_controls failed %d", ret_val); return ret_val; } /* default is earpiece pin, userspace sets it explcitly */ snd_soc_dapm_disable_pin(dapm, "Headphones"); /* default is lineout NC, userspace sets it explcitly */ snd_soc_dapm_disable_pin(dapm, "LINEOUTL"); snd_soc_dapm_disable_pin(dapm, "LINEOUTR"); lo_dac = 3; hs_switch = 0; /* we dont use linein in this so set to NC */ snd_soc_dapm_disable_pin(dapm, "LINEINL"); snd_soc_dapm_disable_pin(dapm, "LINEINR"); snd_soc_dapm_sync(dapm); /* Headset and button jack detection */ ret_val = snd_soc_jack_new(codec, "Intel(R) MID Audio Jack", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1, &mfld_jack); if (ret_val) { pr_err("jack creation failed\n"); return ret_val; } ret_val = snd_soc_jack_add_pins(&mfld_jack, ARRAY_SIZE(mfld_jack_pins), mfld_jack_pins); if (ret_val) { pr_err("adding jack pins failed\n"); return ret_val; } ret_val = snd_soc_jack_add_zones(&mfld_jack, ARRAY_SIZE(mfld_zones), mfld_zones); if (ret_val) { pr_err("adding jack zones failed\n"); return ret_val; } /* we want to check if anything is inserted at boot, * so send a fake event to codec and it will read adc * to find if anything is there or not */ mfld_jack_check(MFLD_JACK_INSERT); return ret_val; } struct snd_soc_dai_link mfld_msic_dailink[] = { { .name = "Medfield Headset", .stream_name = "Headset", .cpu_dai_name = "Headset-cpu-dai", .codec_dai_name = "SN95031 Headset", .codec_name = "sn95031", .platform_name = "sst-platform", .init = mfld_init, }, { .name = "Medfield Speaker", .stream_name = "Speaker", .cpu_dai_name = "Speaker-cpu-dai", .codec_dai_name = "SN95031 Speaker", .codec_name = "sn95031", .platform_name = "sst-platform", .init = NULL, }, { .name = "Medfield Vibra", .stream_name = "Vibra1", .cpu_dai_name = "Vibra1-cpu-dai", .codec_dai_name = "SN95031 Vibra1", .codec_name = "sn95031", .platform_name = "sst-platform", .init = NULL, }, { .name = "Medfield Haptics", .stream_name = "Vibra2", .cpu_dai_name = "Vibra2-cpu-dai", .codec_dai_name = "SN95031 Vibra2", .codec_name = "sn95031", .platform_name = "sst-platform", .init = NULL, }, }; /* SoC card */ static struct snd_soc_card snd_soc_card_mfld = { .name = "medfield_audio", .dai_link = mfld_msic_dailink, .num_links = ARRAY_SIZE(mfld_msic_dailink), }; static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev) { struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev; memcpy_fromio(&mc_private->interrupt_status, ((void *)(mc_private->int_base)), sizeof(u8)); return IRQ_WAKE_THREAD; } static irqreturn_t snd_mfld_jack_detection(int irq, void *data) { struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data; if (mfld_jack.codec == NULL) return IRQ_HANDLED; mfld_jack_check(mc_drv_ctx->interrupt_status); return IRQ_HANDLED; } static int __devinit snd_mfld_mc_probe(struct platform_device *pdev) { int ret_val = 0, irq; struct mfld_mc_private *mc_drv_ctx; struct resource *irq_mem; pr_debug("snd_mfld_mc_probe called\n"); /* retrive the irq number */ irq = platform_get_irq(pdev, 0); /* audio interrupt base of SRAM location where * interrupts are stored by System FW */ mc_drv_ctx = kzalloc(sizeof(*mc_drv_ctx), GFP_ATOMIC); if (!mc_drv_ctx) { pr_err("allocation failed\n"); return -ENOMEM; } irq_mem = platform_get_resource_byname( pdev, IORESOURCE_MEM, "IRQ_BASE"); if (!irq_mem) { pr_err("no mem resource given\n"); ret_val = -ENODEV; goto unalloc; } mc_drv_ctx->int_base = ioremap_nocache(irq_mem->start, resource_size(irq_mem)); if (!mc_drv_ctx->int_base) { pr_err("Mapping of cache failed\n"); ret_val = -ENOMEM; goto unalloc; } /* register for interrupt */ ret_val = request_threaded_irq(irq, snd_mfld_jack_intr_handler, snd_mfld_jack_detection, IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx); if (ret_val) { pr_err("cannot register IRQ\n"); goto unalloc; } /* register the soc card */ snd_soc_card_mfld.dev = &pdev->dev; ret_val = snd_soc_register_card(&snd_soc_card_mfld); if (ret_val) { pr_debug("snd_soc_register_card failed %d\n", ret_val); goto freeirq; } platform_set_drvdata(pdev, mc_drv_ctx); pr_debug("successfully exited probe\n"); return ret_val; freeirq: free_irq(irq, mc_drv_ctx); unalloc: kfree(mc_drv_ctx); return ret_val; } static int __devexit snd_mfld_mc_remove(struct platform_device *pdev) { struct mfld_mc_private *mc_drv_ctx = platform_get_drvdata(pdev); pr_debug("snd_mfld_mc_remove called\n"); free_irq(platform_get_irq(pdev, 0), mc_drv_ctx); snd_soc_unregister_card(&snd_soc_card_mfld); kfree(mc_drv_ctx); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver snd_mfld_mc_driver = { .driver = { .owner = THIS_MODULE, .name = "msic_audio", }, .probe = snd_mfld_mc_probe, .remove = __devexit_p(snd_mfld_mc_remove), }; static int __init snd_mfld_driver_init(void) { pr_debug("snd_mfld_driver_init called\n"); return platform_driver_register(&snd_mfld_mc_driver); } module_init(snd_mfld_driver_init); static void __exit snd_mfld_driver_exit(void) { pr_debug("snd_mfld_driver_exit called\n"); platform_driver_unregister(&snd_mfld_mc_driver); } module_exit(snd_mfld_driver_exit); MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver"); MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:msic-audio");
gpl-2.0
sbdags/tf701t_kernel_source
arch/arm/mach-omap2/pm44xx.c
4707
6099
/* * OMAP4 Power Management Routines * * Copyright (C) 2010-2011 Texas Instruments, Inc. * Rajendra Nayak <rnayak@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/pm.h> #include <linux/suspend.h> #include <linux/module.h> #include <linux/list.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/system_misc.h> #include "common.h" #include "clockdomain.h" #include "powerdomain.h" #include "pm.h" struct power_state { struct powerdomain *pwrdm; u32 next_state; #ifdef CONFIG_SUSPEND u32 saved_state; u32 saved_logic_state; #endif struct list_head node; }; static LIST_HEAD(pwrst_list); #ifdef CONFIG_SUSPEND static int omap4_pm_suspend(void) { struct power_state *pwrst; int state, ret = 0; u32 cpu_id = smp_processor_id(); /* Save current powerdomain state */ list_for_each_entry(pwrst, &pwrst_list, node) { pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm); pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm); } /* Set targeted power domain states by suspend */ list_for_each_entry(pwrst, &pwrst_list, node) { omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF); } /* * For MPUSS to hit power domain retention(CSWR or OSWR), * CPU0 and CPU1 power domains need to be in OFF or DORMANT state, * since CPU power domain CSWR is not supported by hardware * Only master CPU follows suspend path. All other CPUs follow * CPU hotplug path in system wide suspend. On OMAP4, CPU power * domain CSWR is not supported by hardware. * More details can be found in OMAP4430 TRM section 4.3.4.2. */ omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF); /* Restore next powerdomain state */ list_for_each_entry(pwrst, &pwrst_list, node) { state = pwrdm_read_prev_pwrst(pwrst->pwrdm); if (state > pwrst->next_state) { pr_info("Powerdomain (%s) didn't enter " "target state %d\n", pwrst->pwrdm->name, pwrst->next_state); ret = -1; } omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state); pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state); } if (ret) pr_crit("Could not enter target state in pm_suspend\n"); else pr_info("Successfully put all powerdomains to target state\n"); return 0; } #endif /* CONFIG_SUSPEND */ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused) { struct power_state *pwrst; if (!pwrdm->pwrsts) return 0; /* * Skip CPU0 and CPU1 power domains. CPU1 is programmed * through hotplug path and CPU0 explicitly programmed * further down in the code path */ if (!strncmp(pwrdm->name, "cpu", 3)) return 0; /* * FIXME: Remove this check when core retention is supported * Only MPUSS power domain is added in the list. */ if (strcmp(pwrdm->name, "mpu_pwrdm")) return 0; pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC); if (!pwrst) return -ENOMEM; pwrst->pwrdm = pwrdm; pwrst->next_state = PWRDM_POWER_RET; list_add(&pwrst->node, &pwrst_list); return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state); } /** * omap_default_idle - OMAP4 default ilde routine.' * * Implements OMAP4 memory, IO ordering requirements which can't be addressed * with default cpu_do_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and * by secondary CPU with CONFIG_CPUIDLE. */ static void omap_default_idle(void) { local_fiq_disable(); omap_do_wfi(); local_fiq_enable(); } /** * omap4_pm_init - Init routine for OMAP4 PM * * Initializes all powerdomain and clockdomain target states * and all PRCM settings. */ static int __init omap4_pm_init(void) { int ret; struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup; struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm; if (!cpu_is_omap44xx()) return -ENODEV; if (omap_rev() == OMAP4430_REV_ES1_0) { WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); return -ENODEV; } pr_err("Power Management for TI OMAP4.\n"); ret = pwrdm_for_each(pwrdms_setup, NULL); if (ret) { pr_err("Failed to setup powerdomains\n"); goto err2; } /* * The dynamic dependency between MPUSS -> MEMIF and * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as * expected. The hardware recommendation is to enable static * dependencies for these to avoid system lock ups or random crashes. * The L4 wakeup depedency is added to workaround the OCP sync hardware * BUG with 32K synctimer which lead to incorrect timer value read * from the 32K counter. The BUG applies for GPTIMER1 and WDT2 which * are part of L4 wakeup clockdomain. */ mpuss_clkdm = clkdm_lookup("mpuss_clkdm"); emif_clkdm = clkdm_lookup("l3_emif_clkdm"); l3_1_clkdm = clkdm_lookup("l3_1_clkdm"); l3_2_clkdm = clkdm_lookup("l3_2_clkdm"); l4_per_clkdm = clkdm_lookup("l4_per_clkdm"); l4wkup = clkdm_lookup("l4_wkup_clkdm"); ducati_clkdm = clkdm_lookup("ducati_clkdm"); if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) || (!l4wkup) || (!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm)) goto err2; ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm); ret |= clkdm_add_wkdep(mpuss_clkdm, l4wkup); ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm); ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm); if (ret) { pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 " "wakeup dependency\n"); goto err2; } ret = omap4_mpuss_init(); if (ret) { pr_err("Failed to initialise OMAP4 MPUSS\n"); goto err2; } (void) clkdm_for_each(omap_pm_clkdms_setup, NULL); #ifdef CONFIG_SUSPEND omap_pm_suspend = omap4_pm_suspend; #endif /* Overwrite the default cpu_do_idle() */ arm_pm_idle = omap_default_idle; omap4_idle_init(); err2: return ret; } late_initcall(omap4_pm_init);
gpl-2.0
kaber/nf-next-ipv6-nat
sound/pci/lx6464es/lx_core.c
5219
33329
/* -*- linux-c -*- * * * ALSA driver for the digigram lx6464es interface * low-level interface * * Copyright (c) 2009 Tim Blechmann <tim@klingt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * */ /* #define RMH_DEBUG 1 */ #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include "lx6464es.h" #include "lx_core.h" /* low-level register access */ static const unsigned long dsp_port_offsets[] = { 0, 0x400, 0x401, 0x402, 0x403, 0x404, 0x405, 0x406, 0x407, 0x408, 0x409, 0x40a, 0x40b, 0x40c, 0x410, 0x411, 0x412, 0x413, 0x414, 0x415, 0x416, 0x420, 0x430, 0x431, 0x432, 0x433, 0x434, 0x440 }; static void __iomem *lx_dsp_register(struct lx6464es *chip, int port) { void __iomem *base_address = chip->port_dsp_bar; return base_address + dsp_port_offsets[port]*4; } unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port) { void __iomem *address = lx_dsp_register(chip, port); return ioread32(address); } static void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len) { u32 __iomem *address = lx_dsp_register(chip, port); int i; /* we cannot use memcpy_fromio */ for (i = 0; i != len; ++i) data[i] = ioread32(address + i); } void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data) { void __iomem *address = lx_dsp_register(chip, port); iowrite32(data, address); } static void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data, u32 len) { u32 __iomem *address = lx_dsp_register(chip, port); int i; /* we cannot use memcpy_to */ for (i = 0; i != len; ++i) iowrite32(data[i], address + i); } static const unsigned long plx_port_offsets[] = { 0x04, 0x40, 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58, 0x5c, 0x64, 0x68, 0x6C }; static void __iomem *lx_plx_register(struct lx6464es *chip, int port) { void __iomem *base_address = chip->port_plx_remapped; return base_address + plx_port_offsets[port]; } unsigned long lx_plx_reg_read(struct lx6464es *chip, int port) { void __iomem *address = lx_plx_register(chip, port); return ioread32(address); } void lx_plx_reg_write(struct lx6464es *chip, int port, u32 data) { void __iomem *address = lx_plx_register(chip, port); iowrite32(data, address); } u32 lx_plx_mbox_read(struct lx6464es *chip, int mbox_nr) { int index; switch (mbox_nr) { case 1: index = ePLX_MBOX1; break; case 2: index = ePLX_MBOX2; break; case 3: index = ePLX_MBOX3; break; case 4: index = ePLX_MBOX4; break; case 5: index = ePLX_MBOX5; break; case 6: index = ePLX_MBOX6; break; case 7: index = ePLX_MBOX7; break; case 0: /* reserved for HF flags */ snd_BUG(); default: return 0xdeadbeef; } return lx_plx_reg_read(chip, index); } int lx_plx_mbox_write(struct lx6464es *chip, int mbox_nr, u32 value) { int index = -1; switch (mbox_nr) { case 1: index = ePLX_MBOX1; break; case 3: index = ePLX_MBOX3; break; case 4: index = ePLX_MBOX4; break; case 5: index = ePLX_MBOX5; break; case 6: index = ePLX_MBOX6; break; case 7: index = ePLX_MBOX7; break; case 0: /* reserved for HF flags */ case 2: /* reserved for Pipe States * the DSP keeps an image of it */ snd_BUG(); return -EBADRQC; } lx_plx_reg_write(chip, index, value); return 0; } /* rmh */ #ifdef CONFIG_SND_DEBUG #define CMD_NAME(a) a #else #define CMD_NAME(a) NULL #endif #define Reg_CSM_MR 0x00000002 #define Reg_CSM_MC 0x00000001 struct dsp_cmd_info { u32 dcCodeOp; /* Op Code of the command (usually 1st 24-bits * word).*/ u16 dcCmdLength; /* Command length in words of 24 bits.*/ u16 dcStatusType; /* Status type: 0 for fixed length, 1 for * random. */ u16 dcStatusLength; /* Status length (if fixed).*/ char *dcOpName; }; /* Initialization and control data for the Microblaze interface - OpCode: the opcode field of the command set at the proper offset - CmdLength the number of command words - StatusType offset in the status registers: 0 means that the return value may be different from 0, and must be read - StatusLength the number of status words (in addition to the return value) */ static struct dsp_cmd_info dsp_commands[] = { { (CMD_00_INFO_DEBUG << OPCODE_OFFSET) , 1 /*custom*/ , 1 , 0 /**/ , CMD_NAME("INFO_DEBUG") }, { (CMD_01_GET_SYS_CFG << OPCODE_OFFSET) , 1 /**/ , 1 , 2 /**/ , CMD_NAME("GET_SYS_CFG") }, { (CMD_02_SET_GRANULARITY << OPCODE_OFFSET) , 1 /**/ , 1 , 0 /**/ , CMD_NAME("SET_GRANULARITY") }, { (CMD_03_SET_TIMER_IRQ << OPCODE_OFFSET) , 1 /**/ , 1 , 0 /**/ , CMD_NAME("SET_TIMER_IRQ") }, { (CMD_04_GET_EVENT << OPCODE_OFFSET) , 1 /**/ , 1 , 0 /*up to 10*/ , CMD_NAME("GET_EVENT") }, { (CMD_05_GET_PIPES << OPCODE_OFFSET) , 1 /**/ , 1 , 2 /*up to 4*/ , CMD_NAME("GET_PIPES") }, { (CMD_06_ALLOCATE_PIPE << OPCODE_OFFSET) , 1 /**/ , 0 , 0 /**/ , CMD_NAME("ALLOCATE_PIPE") }, { (CMD_07_RELEASE_PIPE << OPCODE_OFFSET) , 1 /**/ , 0 , 0 /**/ , CMD_NAME("RELEASE_PIPE") }, { (CMD_08_ASK_BUFFERS << OPCODE_OFFSET) , 1 /**/ , 1 , MAX_STREAM_BUFFER , CMD_NAME("ASK_BUFFERS") }, { (CMD_09_STOP_PIPE << OPCODE_OFFSET) , 1 /**/ , 0 , 0 /*up to 2*/ , CMD_NAME("STOP_PIPE") }, { (CMD_0A_GET_PIPE_SPL_COUNT << OPCODE_OFFSET) , 1 /**/ , 1 , 1 /*up to 2*/ , CMD_NAME("GET_PIPE_SPL_COUNT") }, { (CMD_0B_TOGGLE_PIPE_STATE << OPCODE_OFFSET) , 1 /*up to 5*/ , 1 , 0 /**/ , CMD_NAME("TOGGLE_PIPE_STATE") }, { (CMD_0C_DEF_STREAM << OPCODE_OFFSET) , 1 /*up to 4*/ , 1 , 0 /**/ , CMD_NAME("DEF_STREAM") }, { (CMD_0D_SET_MUTE << OPCODE_OFFSET) , 3 /**/ , 1 , 0 /**/ , CMD_NAME("SET_MUTE") }, { (CMD_0E_GET_STREAM_SPL_COUNT << OPCODE_OFFSET) , 1/**/ , 1 , 2 /**/ , CMD_NAME("GET_STREAM_SPL_COUNT") }, { (CMD_0F_UPDATE_BUFFER << OPCODE_OFFSET) , 3 /*up to 4*/ , 0 , 1 /**/ , CMD_NAME("UPDATE_BUFFER") }, { (CMD_10_GET_BUFFER << OPCODE_OFFSET) , 1 /**/ , 1 , 4 /**/ , CMD_NAME("GET_BUFFER") }, { (CMD_11_CANCEL_BUFFER << OPCODE_OFFSET) , 1 /**/ , 1 , 1 /*up to 4*/ , CMD_NAME("CANCEL_BUFFER") }, { (CMD_12_GET_PEAK << OPCODE_OFFSET) , 1 /**/ , 1 , 1 /**/ , CMD_NAME("GET_PEAK") }, { (CMD_13_SET_STREAM_STATE << OPCODE_OFFSET) , 1 /**/ , 1 , 0 /**/ , CMD_NAME("SET_STREAM_STATE") }, }; static void lx_message_init(struct lx_rmh *rmh, enum cmd_mb_opcodes cmd) { snd_BUG_ON(cmd >= CMD_14_INVALID); rmh->cmd[0] = dsp_commands[cmd].dcCodeOp; rmh->cmd_len = dsp_commands[cmd].dcCmdLength; rmh->stat_len = dsp_commands[cmd].dcStatusLength; rmh->dsp_stat = dsp_commands[cmd].dcStatusType; rmh->cmd_idx = cmd; memset(&rmh->cmd[1], 0, (REG_CRM_NUMBER - 1) * sizeof(u32)); #ifdef CONFIG_SND_DEBUG memset(rmh->stat, 0, REG_CRM_NUMBER * sizeof(u32)); #endif #ifdef RMH_DEBUG rmh->cmd_idx = cmd; #endif } #ifdef RMH_DEBUG #define LXRMH "lx6464es rmh: " static void lx_message_dump(struct lx_rmh *rmh) { u8 idx = rmh->cmd_idx; int i; snd_printk(LXRMH "command %s\n", dsp_commands[idx].dcOpName); for (i = 0; i != rmh->cmd_len; ++i) snd_printk(LXRMH "\tcmd[%d] %08x\n", i, rmh->cmd[i]); for (i = 0; i != rmh->stat_len; ++i) snd_printk(LXRMH "\tstat[%d]: %08x\n", i, rmh->stat[i]); snd_printk("\n"); } #else static inline void lx_message_dump(struct lx_rmh *rmh) {} #endif /* sleep 500 - 100 = 400 times 100us -> the timeout is >= 40 ms */ #define XILINX_TIMEOUT_MS 40 #define XILINX_POLL_NO_SLEEP 100 #define XILINX_POLL_ITERATIONS 150 static int lx_message_send_atomic(struct lx6464es *chip, struct lx_rmh *rmh) { u32 reg = ED_DSP_TIMED_OUT; int dwloop; if (lx_dsp_reg_read(chip, eReg_CSM) & (Reg_CSM_MC | Reg_CSM_MR)) { snd_printk(KERN_ERR LXP "PIOSendMessage eReg_CSM %x\n", reg); return -EBUSY; } /* write command */ lx_dsp_reg_writebuf(chip, eReg_CRM1, rmh->cmd, rmh->cmd_len); /* MicoBlaze gogogo */ lx_dsp_reg_write(chip, eReg_CSM, Reg_CSM_MC); /* wait for device to answer */ for (dwloop = 0; dwloop != XILINX_TIMEOUT_MS * 1000; ++dwloop) { if (lx_dsp_reg_read(chip, eReg_CSM) & Reg_CSM_MR) { if (rmh->dsp_stat == 0) reg = lx_dsp_reg_read(chip, eReg_CRM1); else reg = 0; goto polling_successful; } else udelay(1); } snd_printk(KERN_WARNING LXP "TIMEOUT lx_message_send_atomic! " "polling failed\n"); polling_successful: if ((reg & ERROR_VALUE) == 0) { /* read response */ if (rmh->stat_len) { snd_BUG_ON(rmh->stat_len >= (REG_CRM_NUMBER-1)); lx_dsp_reg_readbuf(chip, eReg_CRM2, rmh->stat, rmh->stat_len); } } else snd_printk(LXP "rmh error: %08x\n", reg); /* clear Reg_CSM_MR */ lx_dsp_reg_write(chip, eReg_CSM, 0); switch (reg) { case ED_DSP_TIMED_OUT: snd_printk(KERN_WARNING LXP "lx_message_send: dsp timeout\n"); return -ETIMEDOUT; case ED_DSP_CRASHED: snd_printk(KERN_WARNING LXP "lx_message_send: dsp crashed\n"); return -EAGAIN; } lx_message_dump(rmh); return reg; } /* low-level dsp access */ int __devinit lx_dsp_get_version(struct lx6464es *chip, u32 *rdsp_version) { u16 ret; unsigned long flags; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); ret = lx_message_send_atomic(chip, &chip->rmh); *rdsp_version = chip->rmh.stat[1]; spin_unlock_irqrestore(&chip->msg_lock, flags); return ret; } int lx_dsp_get_clock_frequency(struct lx6464es *chip, u32 *rfreq) { u16 ret = 0; unsigned long flags; u32 freq_raw = 0; u32 freq = 0; u32 frequency = 0; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_01_GET_SYS_CFG); ret = lx_message_send_atomic(chip, &chip->rmh); if (ret == 0) { freq_raw = chip->rmh.stat[0] >> FREQ_FIELD_OFFSET; freq = freq_raw & XES_FREQ_COUNT8_MASK; if ((freq < XES_FREQ_COUNT8_48_MAX) || (freq > XES_FREQ_COUNT8_44_MIN)) frequency = 0; /* unknown */ else if (freq >= XES_FREQ_COUNT8_44_MAX) frequency = 44100; else frequency = 48000; } spin_unlock_irqrestore(&chip->msg_lock, flags); *rfreq = frequency * chip->freq_ratio; return ret; } int lx_dsp_get_mac(struct lx6464es *chip) { u32 macmsb, maclsb; macmsb = lx_dsp_reg_read(chip, eReg_ADMACESMSB) & 0x00FFFFFF; maclsb = lx_dsp_reg_read(chip, eReg_ADMACESLSB) & 0x00FFFFFF; /* todo: endianess handling */ chip->mac_address[5] = ((u8 *)(&maclsb))[0]; chip->mac_address[4] = ((u8 *)(&maclsb))[1]; chip->mac_address[3] = ((u8 *)(&maclsb))[2]; chip->mac_address[2] = ((u8 *)(&macmsb))[0]; chip->mac_address[1] = ((u8 *)(&macmsb))[1]; chip->mac_address[0] = ((u8 *)(&macmsb))[2]; return 0; } int lx_dsp_set_granularity(struct lx6464es *chip, u32 gran) { unsigned long flags; int ret; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_02_SET_GRANULARITY); chip->rmh.cmd[0] |= gran; ret = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return ret; } int lx_dsp_read_async_events(struct lx6464es *chip, u32 *data) { unsigned long flags; int ret; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_04_GET_EVENT); chip->rmh.stat_len = 9; /* we don't necessarily need the full length */ ret = lx_message_send_atomic(chip, &chip->rmh); if (!ret) memcpy(data, chip->rmh.stat, chip->rmh.stat_len * sizeof(u32)); spin_unlock_irqrestore(&chip->msg_lock, flags); return ret; } #define CSES_TIMEOUT 100 /* microseconds */ #define CSES_CE 0x0001 #define CSES_BROADCAST 0x0002 #define CSES_UPDATE_LDSV 0x0004 int lx_dsp_es_check_pipeline(struct lx6464es *chip) { int i; for (i = 0; i != CSES_TIMEOUT; ++i) { /* * le bit CSES_UPDATE_LDSV est à 1 dés que le macprog * est pret. il re-passe à 0 lorsque le premier read a * été fait. pour l'instant on retire le test car ce bit * passe a 1 environ 200 à 400 ms aprés que le registre * confES à été écrit (kick du xilinx ES). * * On ne teste que le bit CE. * */ u32 cses = lx_dsp_reg_read(chip, eReg_CSES); if ((cses & CSES_CE) == 0) return 0; udelay(1); } return -ETIMEDOUT; } #define PIPE_INFO_TO_CMD(capture, pipe) \ ((u32)((u32)(pipe) | ((capture) ? ID_IS_CAPTURE : 0L)) << ID_OFFSET) /* low-level pipe handling */ int lx_pipe_allocate(struct lx6464es *chip, u32 pipe, int is_capture, int channels) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_06_ALLOCATE_PIPE); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.cmd[0] |= channels; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); if (err != 0) snd_printk(KERN_ERR "lx6464es: could not allocate pipe\n"); return err; } int lx_pipe_release(struct lx6464es *chip, u32 pipe, int is_capture) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_07_RELEASE_PIPE); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_buffer_ask(struct lx6464es *chip, u32 pipe, int is_capture, u32 *r_needed, u32 *r_freed, u32 *size_array) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); #ifdef CONFIG_SND_DEBUG if (size_array) memset(size_array, 0, sizeof(u32)*MAX_STREAM_BUFFER); #endif *r_needed = 0; *r_freed = 0; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_08_ASK_BUFFERS); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); if (!err) { int i; for (i = 0; i < MAX_STREAM_BUFFER; ++i) { u32 stat = chip->rmh.stat[i]; if (stat & (BF_EOB << BUFF_FLAGS_OFFSET)) { /* finished */ *r_freed += 1; if (size_array) size_array[i] = stat & MASK_DATA_SIZE; } else if ((stat & (BF_VALID << BUFF_FLAGS_OFFSET)) == 0) /* free */ *r_needed += 1; } #if 0 snd_printdd(LXP "CMD_08_ASK_BUFFERS: needed %d, freed %d\n", *r_needed, *r_freed); for (i = 0; i < MAX_STREAM_BUFFER; ++i) { for (i = 0; i != chip->rmh.stat_len; ++i) snd_printdd(" stat[%d]: %x, %x\n", i, chip->rmh.stat[i], chip->rmh.stat[i] & MASK_DATA_SIZE); } #endif } spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_pipe_stop(struct lx6464es *chip, u32 pipe, int is_capture) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_09_STOP_PIPE); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } static int lx_pipe_toggle_state(struct lx6464es *chip, u32 pipe, int is_capture) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0B_TOGGLE_PIPE_STATE); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_pipe_start(struct lx6464es *chip, u32 pipe, int is_capture) { int err; err = lx_pipe_wait_for_idle(chip, pipe, is_capture); if (err < 0) return err; err = lx_pipe_toggle_state(chip, pipe, is_capture); return err; } int lx_pipe_pause(struct lx6464es *chip, u32 pipe, int is_capture) { int err = 0; err = lx_pipe_wait_for_start(chip, pipe, is_capture); if (err < 0) return err; err = lx_pipe_toggle_state(chip, pipe, is_capture); return err; } int lx_pipe_sample_count(struct lx6464es *chip, u32 pipe, int is_capture, u64 *rsample_count) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.stat_len = 2; /* need all words here! */ err = lx_message_send_atomic(chip, &chip->rmh); /* don't sleep! */ if (err != 0) snd_printk(KERN_ERR "lx6464es: could not query pipe's sample count\n"); else { *rsample_count = ((u64)(chip->rmh.stat[0] & MASK_SPL_COUNT_HI) << 24) /* hi part */ + chip->rmh.stat[1]; /* lo part */ } spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_pipe_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 *rstate) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0A_GET_PIPE_SPL_COUNT); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); if (err != 0) snd_printk(KERN_ERR "lx6464es: could not query pipe's state\n"); else *rstate = (chip->rmh.stat[0] >> PSTATE_OFFSET) & 0x0F; spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } static int lx_pipe_wait_for_state(struct lx6464es *chip, u32 pipe, int is_capture, u16 state) { int i; /* max 2*PCMOnlyGranularity = 2*1024 at 44100 = < 50 ms: * timeout 50 ms */ for (i = 0; i != 50; ++i) { u16 current_state; int err = lx_pipe_state(chip, pipe, is_capture, &current_state); if (err < 0) return err; if (current_state == state) return 0; mdelay(1); } return -ETIMEDOUT; } int lx_pipe_wait_for_start(struct lx6464es *chip, u32 pipe, int is_capture) { return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_RUN); } int lx_pipe_wait_for_idle(struct lx6464es *chip, u32 pipe, int is_capture) { return lx_pipe_wait_for_state(chip, pipe, is_capture, PSTATE_IDLE); } /* low-level stream handling */ int lx_stream_set_state(struct lx6464es *chip, u32 pipe, int is_capture, enum stream_state_t state) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_13_SET_STREAM_STATE); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.cmd[0] |= state; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_stream_set_format(struct lx6464es *chip, struct snd_pcm_runtime *runtime, u32 pipe, int is_capture) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); u32 channels = runtime->channels; if (runtime->channels != channels) snd_printk(KERN_ERR LXP "channel count mismatch: %d vs %d", runtime->channels, channels); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0C_DEF_STREAM); chip->rmh.cmd[0] |= pipe_cmd; if (runtime->sample_bits == 16) /* 16 bit format */ chip->rmh.cmd[0] |= (STREAM_FMT_16b << STREAM_FMT_OFFSET); if (snd_pcm_format_little_endian(runtime->format)) /* little endian/intel format */ chip->rmh.cmd[0] |= (STREAM_FMT_intel << STREAM_FMT_OFFSET); chip->rmh.cmd[0] |= channels-1; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_stream_state(struct lx6464es *chip, u32 pipe, int is_capture, int *rstate) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); *rstate = (chip->rmh.stat[0] & SF_START) ? START_STATE : PAUSE_STATE; spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_stream_sample_position(struct lx6464es *chip, u32 pipe, int is_capture, u64 *r_bytepos) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0E_GET_STREAM_SPL_COUNT); chip->rmh.cmd[0] |= pipe_cmd; err = lx_message_send_atomic(chip, &chip->rmh); *r_bytepos = ((u64) (chip->rmh.stat[0] & MASK_SPL_COUNT_HI) << 32) /* hi part */ + chip->rmh.stat[1]; /* lo part */ spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } /* low-level buffer handling */ int lx_buffer_give(struct lx6464es *chip, u32 pipe, int is_capture, u32 buffer_size, u32 buf_address_lo, u32 buf_address_hi, u32 *r_buffer_index) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0F_UPDATE_BUFFER); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.cmd[0] |= BF_NOTIFY_EOB; /* request interrupt notification */ /* todo: pause request, circular buffer */ chip->rmh.cmd[1] = buffer_size & MASK_DATA_SIZE; chip->rmh.cmd[2] = buf_address_lo; if (buf_address_hi) { chip->rmh.cmd_len = 4; chip->rmh.cmd[3] = buf_address_hi; chip->rmh.cmd[0] |= BF_64BITS_ADR; } err = lx_message_send_atomic(chip, &chip->rmh); if (err == 0) { *r_buffer_index = chip->rmh.stat[0]; goto done; } if (err == EB_RBUFFERS_TABLE_OVERFLOW) snd_printk(LXP "lx_buffer_give EB_RBUFFERS_TABLE_OVERFLOW\n"); if (err == EB_INVALID_STREAM) snd_printk(LXP "lx_buffer_give EB_INVALID_STREAM\n"); if (err == EB_CMD_REFUSED) snd_printk(LXP "lx_buffer_give EB_CMD_REFUSED\n"); done: spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_buffer_free(struct lx6464es *chip, u32 pipe, int is_capture, u32 *r_buffer_size) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.cmd[0] |= MASK_BUFFER_ID; /* ask for the current buffer: the * microblaze will seek for it */ err = lx_message_send_atomic(chip, &chip->rmh); if (err == 0) *r_buffer_size = chip->rmh.stat[0] & MASK_DATA_SIZE; spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } int lx_buffer_cancel(struct lx6464es *chip, u32 pipe, int is_capture, u32 buffer_index) { int err; unsigned long flags; u32 pipe_cmd = PIPE_INFO_TO_CMD(is_capture, pipe); spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_11_CANCEL_BUFFER); chip->rmh.cmd[0] |= pipe_cmd; chip->rmh.cmd[0] |= buffer_index; err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } /* low-level gain/peak handling * * \todo: can we unmute capture/playback channels independently? * * */ int lx_level_unmute(struct lx6464es *chip, int is_capture, int unmute) { int err; unsigned long flags; /* bit set to 1: channel muted */ u64 mute_mask = unmute ? 0 : 0xFFFFFFFFFFFFFFFFLLU; spin_lock_irqsave(&chip->msg_lock, flags); lx_message_init(&chip->rmh, CMD_0D_SET_MUTE); chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, 0); chip->rmh.cmd[1] = (u32)(mute_mask >> (u64)32); /* hi part */ chip->rmh.cmd[2] = (u32)(mute_mask & (u64)0xFFFFFFFF); /* lo part */ snd_printk("mute %x %x %x\n", chip->rmh.cmd[0], chip->rmh.cmd[1], chip->rmh.cmd[2]); err = lx_message_send_atomic(chip, &chip->rmh); spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } static u32 peak_map[] = { 0x00000109, /* -90.308dB */ 0x0000083B, /* -72.247dB */ 0x000020C4, /* -60.205dB */ 0x00008273, /* -48.030dB */ 0x00020756, /* -36.005dB */ 0x00040C37, /* -30.001dB */ 0x00081385, /* -24.002dB */ 0x00101D3F, /* -18.000dB */ 0x0016C310, /* -15.000dB */ 0x002026F2, /* -12.001dB */ 0x002D6A86, /* -9.000dB */ 0x004026E6, /* -6.004dB */ 0x005A9DF6, /* -3.000dB */ 0x0065AC8B, /* -2.000dB */ 0x00721481, /* -1.000dB */ 0x007FFFFF, /* FS */ }; int lx_level_peaks(struct lx6464es *chip, int is_capture, int channels, u32 *r_levels) { int err = 0; unsigned long flags; int i; spin_lock_irqsave(&chip->msg_lock, flags); for (i = 0; i < channels; i += 4) { u32 s0, s1, s2, s3; lx_message_init(&chip->rmh, CMD_12_GET_PEAK); chip->rmh.cmd[0] |= PIPE_INFO_TO_CMD(is_capture, i); err = lx_message_send_atomic(chip, &chip->rmh); if (err == 0) { s0 = peak_map[chip->rmh.stat[0] & 0x0F]; s1 = peak_map[(chip->rmh.stat[0] >> 4) & 0xf]; s2 = peak_map[(chip->rmh.stat[0] >> 8) & 0xf]; s3 = peak_map[(chip->rmh.stat[0] >> 12) & 0xf]; } else s0 = s1 = s2 = s3 = 0; r_levels[0] = s0; r_levels[1] = s1; r_levels[2] = s2; r_levels[3] = s3; r_levels += 4; } spin_unlock_irqrestore(&chip->msg_lock, flags); return err; } /* interrupt handling */ #define PCX_IRQ_NONE 0 #define IRQCS_ACTIVE_PCIDB 0x00002000L /* Bit nø 13 */ #define IRQCS_ENABLE_PCIIRQ 0x00000100L /* Bit nø 08 */ #define IRQCS_ENABLE_PCIDB 0x00000200L /* Bit nø 09 */ static u32 lx_interrupt_test_ack(struct lx6464es *chip) { u32 irqcs = lx_plx_reg_read(chip, ePLX_IRQCS); /* Test if PCI Doorbell interrupt is active */ if (irqcs & IRQCS_ACTIVE_PCIDB) { u32 temp; irqcs = PCX_IRQ_NONE; while ((temp = lx_plx_reg_read(chip, ePLX_L2PCIDB))) { /* RAZ interrupt */ irqcs |= temp; lx_plx_reg_write(chip, ePLX_L2PCIDB, temp); } return irqcs; } return PCX_IRQ_NONE; } static int lx_interrupt_ack(struct lx6464es *chip, u32 *r_irqsrc, int *r_async_pending, int *r_async_escmd) { u32 irq_async; u32 irqsrc = lx_interrupt_test_ack(chip); if (irqsrc == PCX_IRQ_NONE) return 0; *r_irqsrc = irqsrc; irq_async = irqsrc & MASK_SYS_ASYNC_EVENTS; /* + EtherSound response * (set by xilinx) + EOB */ if (irq_async & MASK_SYS_STATUS_ESA) { irq_async &= ~MASK_SYS_STATUS_ESA; *r_async_escmd = 1; } if (irq_async) { /* snd_printd("interrupt: async event pending\n"); */ *r_async_pending = 1; } return 1; } static int lx_interrupt_handle_async_events(struct lx6464es *chip, u32 irqsrc, int *r_freq_changed, u64 *r_notified_in_pipe_mask, u64 *r_notified_out_pipe_mask) { int err; u32 stat[9]; /* answer from CMD_04_GET_EVENT */ /* On peut optimiser pour ne pas lire les evenements vides * les mots de réponse sont dans l'ordre suivant : * Stat[0] mot de status général * Stat[1] fin de buffer OUT pF * Stat[2] fin de buffer OUT pf * Stat[3] fin de buffer IN pF * Stat[4] fin de buffer IN pf * Stat[5] underrun poid fort * Stat[6] underrun poid faible * Stat[7] overrun poid fort * Stat[8] overrun poid faible * */ u64 orun_mask; u64 urun_mask; #if 0 int has_underrun = (irqsrc & MASK_SYS_STATUS_URUN) ? 1 : 0; int has_overrun = (irqsrc & MASK_SYS_STATUS_ORUN) ? 1 : 0; #endif int eb_pending_out = (irqsrc & MASK_SYS_STATUS_EOBO) ? 1 : 0; int eb_pending_in = (irqsrc & MASK_SYS_STATUS_EOBI) ? 1 : 0; *r_freq_changed = (irqsrc & MASK_SYS_STATUS_FREQ) ? 1 : 0; err = lx_dsp_read_async_events(chip, stat); if (err < 0) return err; if (eb_pending_in) { *r_notified_in_pipe_mask = ((u64)stat[3] << 32) + stat[4]; snd_printdd(LXP "interrupt: EOBI pending %llx\n", *r_notified_in_pipe_mask); } if (eb_pending_out) { *r_notified_out_pipe_mask = ((u64)stat[1] << 32) + stat[2]; snd_printdd(LXP "interrupt: EOBO pending %llx\n", *r_notified_out_pipe_mask); } orun_mask = ((u64)stat[7] << 32) + stat[8]; urun_mask = ((u64)stat[5] << 32) + stat[6]; /* todo: handle xrun notification */ return err; } static int lx_interrupt_request_new_buffer(struct lx6464es *chip, struct lx_stream *lx_stream) { struct snd_pcm_substream *substream = lx_stream->stream; const unsigned int is_capture = lx_stream->is_capture; int err; unsigned long flags; const u32 channels = substream->runtime->channels; const u32 bytes_per_frame = channels * 3; const u32 period_size = substream->runtime->period_size; const u32 period_bytes = period_size * bytes_per_frame; const u32 pos = lx_stream->frame_pos; const u32 next_pos = ((pos+1) == substream->runtime->periods) ? 0 : pos + 1; dma_addr_t buf = substream->dma_buffer.addr + pos * period_bytes; u32 buf_hi = 0; u32 buf_lo = 0; u32 buffer_index = 0; u32 needed, freed; u32 size_array[MAX_STREAM_BUFFER]; snd_printdd("->lx_interrupt_request_new_buffer\n"); spin_lock_irqsave(&chip->lock, flags); err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array); snd_printdd(LXP "interrupt: needed %d, freed %d\n", needed, freed); unpack_pointer(buf, &buf_lo, &buf_hi); err = lx_buffer_give(chip, 0, is_capture, period_bytes, buf_lo, buf_hi, &buffer_index); snd_printdd(LXP "interrupt: gave buffer index %x on %p (%d bytes)\n", buffer_index, (void *)buf, period_bytes); lx_stream->frame_pos = next_pos; spin_unlock_irqrestore(&chip->lock, flags); return err; } void lx_tasklet_playback(unsigned long data) { struct lx6464es *chip = (struct lx6464es *)data; struct lx_stream *lx_stream = &chip->playback_stream; int err; snd_printdd("->lx_tasklet_playback\n"); err = lx_interrupt_request_new_buffer(chip, lx_stream); if (err < 0) snd_printk(KERN_ERR LXP "cannot request new buffer for playback\n"); snd_pcm_period_elapsed(lx_stream->stream); } void lx_tasklet_capture(unsigned long data) { struct lx6464es *chip = (struct lx6464es *)data; struct lx_stream *lx_stream = &chip->capture_stream; int err; snd_printdd("->lx_tasklet_capture\n"); err = lx_interrupt_request_new_buffer(chip, lx_stream); if (err < 0) snd_printk(KERN_ERR LXP "cannot request new buffer for capture\n"); snd_pcm_period_elapsed(lx_stream->stream); } static int lx_interrupt_handle_audio_transfer(struct lx6464es *chip, u64 notified_in_pipe_mask, u64 notified_out_pipe_mask) { int err = 0; if (notified_in_pipe_mask) { snd_printdd(LXP "requesting audio transfer for capture\n"); tasklet_hi_schedule(&chip->tasklet_capture); } if (notified_out_pipe_mask) { snd_printdd(LXP "requesting audio transfer for playback\n"); tasklet_hi_schedule(&chip->tasklet_playback); } return err; } irqreturn_t lx_interrupt(int irq, void *dev_id) { struct lx6464es *chip = dev_id; int async_pending, async_escmd; u32 irqsrc; spin_lock(&chip->lock); snd_printdd("**************************************************\n"); if (!lx_interrupt_ack(chip, &irqsrc, &async_pending, &async_escmd)) { spin_unlock(&chip->lock); snd_printdd("IRQ_NONE\n"); return IRQ_NONE; /* this device did not cause the interrupt */ } if (irqsrc & MASK_SYS_STATUS_CMD_DONE) goto exit; #if 0 if (irqsrc & MASK_SYS_STATUS_EOBI) snd_printdd(LXP "interrupt: EOBI\n"); if (irqsrc & MASK_SYS_STATUS_EOBO) snd_printdd(LXP "interrupt: EOBO\n"); if (irqsrc & MASK_SYS_STATUS_URUN) snd_printdd(LXP "interrupt: URUN\n"); if (irqsrc & MASK_SYS_STATUS_ORUN) snd_printdd(LXP "interrupt: ORUN\n"); #endif if (async_pending) { u64 notified_in_pipe_mask = 0; u64 notified_out_pipe_mask = 0; int freq_changed; int err; /* handle async events */ err = lx_interrupt_handle_async_events(chip, irqsrc, &freq_changed, &notified_in_pipe_mask, &notified_out_pipe_mask); if (err) snd_printk(KERN_ERR LXP "error handling async events\n"); err = lx_interrupt_handle_audio_transfer(chip, notified_in_pipe_mask, notified_out_pipe_mask ); if (err) snd_printk(KERN_ERR LXP "error during audio transfer\n"); } if (async_escmd) { #if 0 /* backdoor for ethersound commands * * for now, we do not need this * * */ snd_printdd("lx6464es: interrupt requests escmd handling\n"); #endif } exit: spin_unlock(&chip->lock); return IRQ_HANDLED; /* this device caused the interrupt */ } static void lx_irq_set(struct lx6464es *chip, int enable) { u32 reg = lx_plx_reg_read(chip, ePLX_IRQCS); /* enable/disable interrupts * * Set the Doorbell and PCI interrupt enable bits * * */ if (enable) reg |= (IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); else reg &= ~(IRQCS_ENABLE_PCIIRQ | IRQCS_ENABLE_PCIDB); lx_plx_reg_write(chip, ePLX_IRQCS, reg); } void lx_irq_enable(struct lx6464es *chip) { snd_printdd("->lx_irq_enable\n"); lx_irq_set(chip, 1); } void lx_irq_disable(struct lx6464es *chip) { snd_printdd("->lx_irq_disable\n"); lx_irq_set(chip, 0); }
gpl-2.0
EvolutionzzXDA/ubertale_sprout
drivers/media/usb/dvb-usb/vp702x-fe.c
8803
8968
/* DVB frontend part of the Linux driver for the TwinhanDTV StarBox USB2.0 * DVB-S receiver. * * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de> * Metzler Brothers Systementwicklung GbR * * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de> * * Thanks to Twinhan who kindly provided hardware and information. * * This file can be removed soon, after the DST-driver is rewritten to provice * the frontend-controlling separately. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation, version 2. * * see Documentation/dvb/README.dvb-usb for more information * */ #include "vp702x.h" struct vp702x_fe_state { struct dvb_frontend fe; struct dvb_usb_device *d; struct dvb_frontend_ops ops; fe_sec_voltage_t voltage; fe_sec_tone_mode_t tone_mode; u8 lnb_buf[8]; u8 lock; u8 sig; u8 snr; unsigned long next_status_check; unsigned long status_check_interval; }; static int vp702x_fe_refresh_state(struct vp702x_fe_state *st) { struct vp702x_device_state *dst = st->d->priv; u8 *buf; if (time_after(jiffies, st->next_status_check)) { mutex_lock(&dst->buf_mutex); buf = dst->buf; vp702x_usb_in_op(st->d, READ_STATUS, 0, 0, buf, 10); st->lock = buf[4]; vp702x_usb_in_op(st->d, READ_TUNER_REG_REQ, 0x11, 0, buf, 1); st->snr = buf[0]; vp702x_usb_in_op(st->d, READ_TUNER_REG_REQ, 0x15, 0, buf, 1); st->sig = buf[0]; mutex_unlock(&dst->buf_mutex); st->next_status_check = jiffies + (st->status_check_interval*HZ)/1000; } return 0; } static u8 vp702x_chksum(u8 *buf,int f, int count) { u8 s = 0; int i; for (i = f; i < f+count; i++) s += buf[i]; return ~s+1; } static int vp702x_fe_read_status(struct dvb_frontend* fe, fe_status_t *status) { struct vp702x_fe_state *st = fe->demodulator_priv; vp702x_fe_refresh_state(st); deb_fe("%s\n",__func__); if (st->lock == 0) *status = FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI | FE_HAS_SIGNAL | FE_HAS_CARRIER; else *status = 0; if (*status & FE_HAS_LOCK) st->status_check_interval = 1000; else st->status_check_interval = 250; return 0; } /* not supported by this Frontend */ static int vp702x_fe_read_ber(struct dvb_frontend* fe, u32 *ber) { struct vp702x_fe_state *st = fe->demodulator_priv; vp702x_fe_refresh_state(st); *ber = 0; return 0; } /* not supported by this Frontend */ static int vp702x_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { struct vp702x_fe_state *st = fe->demodulator_priv; vp702x_fe_refresh_state(st); *unc = 0; return 0; } static int vp702x_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct vp702x_fe_state *st = fe->demodulator_priv; vp702x_fe_refresh_state(st); *strength = (st->sig << 8) | st->sig; return 0; } static int vp702x_fe_read_snr(struct dvb_frontend* fe, u16 *snr) { u8 _snr; struct vp702x_fe_state *st = fe->demodulator_priv; vp702x_fe_refresh_state(st); _snr = (st->snr & 0x1f) * 0xff / 0x1f; *snr = (_snr << 8) | _snr; return 0; } static int vp702x_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { deb_fe("%s\n",__func__); tune->min_delay_ms = 2000; return 0; } static int vp702x_fe_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct vp702x_fe_state *st = fe->demodulator_priv; struct vp702x_device_state *dst = st->d->priv; u32 freq = fep->frequency/1000; /*CalFrequency*/ /* u16 frequencyRef[16] = { 2, 4, 8, 16, 32, 64, 128, 256, 24, 5, 10, 20, 40, 80, 160, 320 }; */ u64 sr; u8 *cmd; mutex_lock(&dst->buf_mutex); cmd = dst->buf; memset(cmd, 0, 10); cmd[0] = (freq >> 8) & 0x7f; cmd[1] = freq & 0xff; cmd[2] = 1; /* divrate == 4 -> frequencyRef[1] -> 1 here */ sr = (u64) (fep->symbol_rate/1000) << 20; do_div(sr,88000); cmd[3] = (sr >> 12) & 0xff; cmd[4] = (sr >> 4) & 0xff; cmd[5] = (sr << 4) & 0xf0; deb_fe("setting frontend to: %u -> %u (%x) LNB-based GHz, symbolrate: %d -> %lu (%lx)\n", fep->frequency, freq, freq, fep->symbol_rate, (unsigned long) sr, (unsigned long) sr); /* if (fep->inversion == INVERSION_ON) cmd[6] |= 0x80; */ if (st->voltage == SEC_VOLTAGE_18) cmd[6] |= 0x40; /* if (fep->symbol_rate > 8000000) cmd[6] |= 0x20; if (fep->frequency < 1531000) cmd[6] |= 0x04; if (st->tone_mode == SEC_TONE_ON) cmd[6] |= 0x01;*/ cmd[7] = vp702x_chksum(cmd,0,7); st->status_check_interval = 250; st->next_status_check = jiffies; vp702x_usb_inout_op(st->d, cmd, 8, cmd, 10, 100); if (cmd[2] == 0 && cmd[3] == 0) deb_fe("tuning failed.\n"); else deb_fe("tuning succeeded.\n"); mutex_unlock(&dst->buf_mutex); return 0; } static int vp702x_fe_init(struct dvb_frontend *fe) { struct vp702x_fe_state *st = fe->demodulator_priv; deb_fe("%s\n",__func__); vp702x_usb_in_op(st->d, RESET_TUNER, 0, 0, NULL, 0); return 0; } static int vp702x_fe_sleep(struct dvb_frontend *fe) { deb_fe("%s\n",__func__); return 0; } static int vp702x_fe_send_diseqc_msg (struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *m) { u8 *cmd; struct vp702x_fe_state *st = fe->demodulator_priv; struct vp702x_device_state *dst = st->d->priv; deb_fe("%s\n",__func__); if (m->msg_len > 4) return -EINVAL; mutex_lock(&dst->buf_mutex); cmd = dst->buf; cmd[1] = SET_DISEQC_CMD; cmd[2] = m->msg_len; memcpy(&cmd[3], m->msg, m->msg_len); cmd[7] = vp702x_chksum(cmd, 0, 7); vp702x_usb_inout_op(st->d, cmd, 8, cmd, 10, 100); if (cmd[2] == 0 && cmd[3] == 0) deb_fe("diseqc cmd failed.\n"); else deb_fe("diseqc cmd succeeded.\n"); mutex_unlock(&dst->buf_mutex); return 0; } static int vp702x_fe_send_diseqc_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t burst) { deb_fe("%s\n",__func__); return 0; } static int vp702x_fe_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) { struct vp702x_fe_state *st = fe->demodulator_priv; struct vp702x_device_state *dst = st->d->priv; u8 *buf; deb_fe("%s\n",__func__); st->tone_mode = tone; if (tone == SEC_TONE_ON) st->lnb_buf[2] = 0x02; else st->lnb_buf[2] = 0x00; st->lnb_buf[7] = vp702x_chksum(st->lnb_buf, 0, 7); mutex_lock(&dst->buf_mutex); buf = dst->buf; memcpy(buf, st->lnb_buf, 8); vp702x_usb_inout_op(st->d, buf, 8, buf, 10, 100); if (buf[2] == 0 && buf[3] == 0) deb_fe("set_tone cmd failed.\n"); else deb_fe("set_tone cmd succeeded.\n"); mutex_unlock(&dst->buf_mutex); return 0; } static int vp702x_fe_set_voltage (struct dvb_frontend* fe, fe_sec_voltage_t voltage) { struct vp702x_fe_state *st = fe->demodulator_priv; struct vp702x_device_state *dst = st->d->priv; u8 *buf; deb_fe("%s\n",__func__); st->voltage = voltage; if (voltage != SEC_VOLTAGE_OFF) st->lnb_buf[4] = 0x01; else st->lnb_buf[4] = 0x00; st->lnb_buf[7] = vp702x_chksum(st->lnb_buf, 0, 7); mutex_lock(&dst->buf_mutex); buf = dst->buf; memcpy(buf, st->lnb_buf, 8); vp702x_usb_inout_op(st->d, buf, 8, buf, 10, 100); if (buf[2] == 0 && buf[3] == 0) deb_fe("set_voltage cmd failed.\n"); else deb_fe("set_voltage cmd succeeded.\n"); mutex_unlock(&dst->buf_mutex); return 0; } static void vp702x_fe_release(struct dvb_frontend* fe) { struct vp702x_fe_state *st = fe->demodulator_priv; kfree(st); } static struct dvb_frontend_ops vp702x_fe_ops; struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d) { struct vp702x_fe_state *s = kzalloc(sizeof(struct vp702x_fe_state), GFP_KERNEL); if (s == NULL) goto error; s->d = d; memcpy(&s->fe.ops,&vp702x_fe_ops,sizeof(struct dvb_frontend_ops)); s->fe.demodulator_priv = s; s->lnb_buf[1] = SET_LNB_POWER; s->lnb_buf[3] = 0xff; /* 0=tone burst, 2=data burst, ff=off */ return &s->fe; error: return NULL; } static struct dvb_frontend_ops vp702x_fe_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 1000, /* kHz for QPSK frontends */ .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, /* ppm */ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = vp702x_fe_release, .init = vp702x_fe_init, .sleep = vp702x_fe_sleep, .set_frontend = vp702x_fe_set_frontend, .get_tune_settings = vp702x_fe_get_tune_settings, .read_status = vp702x_fe_read_status, .read_ber = vp702x_fe_read_ber, .read_signal_strength = vp702x_fe_read_signal_strength, .read_snr = vp702x_fe_read_snr, .read_ucblocks = vp702x_fe_read_unc_blocks, .diseqc_send_master_cmd = vp702x_fe_send_diseqc_msg, .diseqc_send_burst = vp702x_fe_send_diseqc_burst, .set_tone = vp702x_fe_set_tone, .set_voltage = vp702x_fe_set_voltage, };
gpl-2.0
alephzain/archos-gpl-gen8-kernel
drivers/video/console/font_acorn_8x8.c
14691
16047
/* Acorn-like font definition, with PC graphics characters */ #include <linux/font.h> static const unsigned char acorndata_8x8[] = { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ /* 03 */ 0x6c, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^C */ /* 04 */ 0x10, 0x38, 0x7c, 0xfe, 0x7c, 0x38, 0x10, 0x00, /* ^D */ /* 05 */ 0x00, 0x18, 0x3c, 0xe7, 0xe7, 0x3c, 0x18, 0x00, /* ^E */ /* 06 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 07 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 08 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 09 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 0F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 10 */ 0x00, 0x60, 0x78, 0x7e, 0x7e, 0x78, 0x60, 0x00, /* |> */ /* 11 */ 0x00, 0x06, 0x1e, 0x7e, 0x7e, 0x1e, 0x06, 0x00, /* <| */ /* 12 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 13 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 14 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 15 */ 0x3c, 0x60, 0x3c, 0x66, 0x3c, 0x06, 0x3c, 0x00, /* 16 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 17 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 18 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 19 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1A */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1B */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1C */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1D */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 1E */ 0x00, 0x18, 0x18, 0x3c, 0x3c, 0x7e, 0x7e, 0x00, /* /\ */ /* 1F */ 0x00, 0x7e, 0x7e, 0x3c, 0x3c, 0x18, 0x18, 0x00, /* \/ */ /* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* */ /* 21 */ 0x18, 0x3c, 0x3c, 0x18, 0x18, 0x00, 0x18, 0x00, /* ! */ /* 22 */ 0x6C, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00, /* " */ /* 23 */ 0x36, 0x36, 0x7F, 0x36, 0x7F, 0x36, 0x36, 0x00, /* # */ /* 24 */ 0x0C, 0x3F, 0x68, 0x3E, 0x0B, 0x7E, 0x18, 0x00, /* $ */ /* 25 */ 0x60, 0x66, 0x0C, 0x18, 0x30, 0x66, 0x06, 0x00, /* % */ /* 26 */ 0x38, 0x6C, 0x6C, 0x38, 0x6D, 0x66, 0x3B, 0x00, /* & */ /* 27 */ 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, /* ' */ /* 28 */ 0x0C, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x00, /* ( */ /* 29 */ 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x18, 0x30, 0x00, /* ) */ /* 2A */ 0x00, 0x18, 0x7E, 0x3C, 0x7E, 0x18, 0x00, 0x00, /* * */ /* 2B */ 0x00, 0x18, 0x18, 0x7E, 0x18, 0x18, 0x00, 0x00, /* + */ /* 2C */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, /* , */ /* 2D */ 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, /* - */ /* 2E */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, /* . */ /* 2F */ 0x00, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00, 0x00, /* / */ /* 30 */ 0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x3C, 0x00, /* 0 */ /* 31 */ 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* 1 */ /* 32 */ 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* 2 */ /* 33 */ 0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, /* 3 */ /* 34 */ 0x0C, 0x1C, 0x3C, 0x6C, 0x7E, 0x0C, 0x0C, 0x00, /* 4 */ /* 35 */ 0x7E, 0x60, 0x7C, 0x06, 0x06, 0x66, 0x3C, 0x00, /* 5 */ /* 36 */ 0x1C, 0x30, 0x60, 0x7C, 0x66, 0x66, 0x3C, 0x00, /* 6 */ /* 37 */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, /* 7 */ /* 38 */ 0x3C, 0x66, 0x66, 0x3C, 0x66, 0x66, 0x3C, 0x00, /* 8 */ /* 39 */ 0x3C, 0x66, 0x66, 0x3E, 0x06, 0x0C, 0x38, 0x00, /* 9 */ /* 3A */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, /* : */ /* 3B */ 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30, /* ; */ /* 3C */ 0x0C, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0C, 0x00, /* < */ /* 3D */ 0x00, 0x00, 0x7E, 0x00, 0x7E, 0x00, 0x00, 0x00, /* = */ /* 3E */ 0x30, 0x18, 0x0C, 0x06, 0x0C, 0x18, 0x30, 0x00, /* > */ /* 3F */ 0x3C, 0x66, 0x0C, 0x18, 0x18, 0x00, 0x18, 0x00, /* ? */ /* 40 */ 0x3C, 0x66, 0x6E, 0x6A, 0x6E, 0x60, 0x3C, 0x00, /* @ */ /* 41 */ 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* A */ /* 42 */ 0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, /* B */ /* 43 */ 0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, /* C */ /* 44 */ 0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, /* D */ /* 45 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, /* E */ /* 46 */ 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x60, 0x00, /* F */ /* 47 */ 0x3C, 0x66, 0x60, 0x6E, 0x66, 0x66, 0x3C, 0x00, /* G */ /* 48 */ 0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, /* H */ /* 49 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, /* I */ /* 4A */ 0x3E, 0x0C, 0x0C, 0x0C, 0x0C, 0x6C, 0x38, 0x00, /* J */ /* 4B */ 0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, /* K */ /* 4C */ 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, /* L */ /* 4D */ 0x63, 0x77, 0x7F, 0x6B, 0x6B, 0x63, 0x63, 0x00, /* M */ /* 4E */ 0x66, 0x66, 0x76, 0x7E, 0x6E, 0x66, 0x66, 0x00, /* N */ /* 4F */ 0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* O */ /* 50 */ 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, /* P */ /* 51 */ 0x3C, 0x66, 0x66, 0x66, 0x6A, 0x6C, 0x36, 0x00, /* Q */ /* 52 */ 0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, /* R */ /* 53 */ 0x3C, 0x66, 0x60, 0x3C, 0x06, 0x66, 0x3C, 0x00, /* S */ /* 54 */ 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* T */ /* 55 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, /* U */ /* 56 */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* V */ /* 57 */ 0x63, 0x63, 0x6B, 0x6B, 0x7F, 0x77, 0x63, 0x00, /* W */ /* 58 */ 0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, /* X */ /* 59 */ 0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, /* Y */ /* 5A */ 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x7E, 0x00, /* Z */ /* 5B */ 0x7C, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7C, 0x00, /* [ */ /* 5C */ 0x00, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x00, 0x00, /* \ */ /* 5D */ 0x3E, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3E, 0x00, /* ] */ /* 5E */ 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^ */ /* 5F */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, /* _ */ /* 60 */ 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ` */ /* 61 */ 0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3E, 0x00, /* a */ /* 62 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x7C, 0x00, /* b */ /* 63 */ 0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, /* c */ /* 64 */ 0x06, 0x06, 0x3E, 0x66, 0x66, 0x66, 0x3E, 0x00, /* d */ /* 65 */ 0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, /* e */ /* 66 */ 0x1C, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x00, /* f */ /* 67 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* g */ /* 68 */ 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* h */ /* 69 */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3C, 0x00, /* i */ /* 6A */ 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, /* j */ /* 6B */ 0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, /* k */ /* 6C */ 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x00, /* l */ /* 6D */ 0x00, 0x00, 0x36, 0x7F, 0x6B, 0x6B, 0x63, 0x00, /* m */ /* 6E */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, /* n */ /* 6F */ 0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, /* o */ /* 70 */ 0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, /* p */ /* 71 */ 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x07, /* q */ /* 72 */ 0x00, 0x00, 0x6C, 0x76, 0x60, 0x60, 0x60, 0x00, /* r */ /* 73 */ 0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, /* s */ /* 74 */ 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x1C, 0x00, /* t */ /* 75 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, /* u */ /* 76 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, /* v */ /* 77 */ 0x00, 0x00, 0x63, 0x6B, 0x6B, 0x7F, 0x36, 0x00, /* w */ /* 78 */ 0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, /* x */ /* 79 */ 0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, /* y */ /* 7A */ 0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, /* z */ /* 7B */ 0x0C, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0C, 0x00, /* { */ /* 7C */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* | */ /* 7D */ 0x30, 0x18, 0x18, 0x0E, 0x18, 0x18, 0x30, 0x00, /* } */ /* 7E */ 0x31, 0x6B, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, /* ~ */ /* 7F */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /*  */ /* 80 */ 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x30, 0x60, /* 81 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 82 */ 0x0c, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 83 */ 0x18, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 84 */ 0x66, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 85 */ 0x30, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 86 */ 0x3c, 0x66, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* 87 */ 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x60, /* 88 */ 0x3c, 0x66, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 89 */ 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 8A */ 0x30, 0x18, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00, /* 8B */ 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8C */ 0x3c, 0x66, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8D */ 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* 8E */ 0x66, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00, /* 8F */ 0x18, 0x66, 0x00, 0x3c, 0x66, 0x7e, 0x66, 0x00, /* 90 */ 0x0c, 0x18, 0x7e, 0x60, 0x7c, 0x60, 0x7e, 0x00, /* 91 */ 0x00, 0x00, 0x3f, 0x0d, 0x3f, 0x6c, 0x3f, 0x00, /* 92 */ 0x3f, 0x66, 0x66, 0x7f, 0x66, 0x66, 0x67, 0x00, /* 93 */ 0x3c, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 94 */ 0x66, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 95 */ 0x30, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* 96 */ 0x3c, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 97 */ 0x30, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* 98 */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c, /* 99 */ 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00, /* 9A */ 0x66, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00, /* 9B */ 0x08, 0x3e, 0x6b, 0x68, 0x6b, 0x3e, 0x08, 0x00, /* 9C */ 0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00, /* 9D */ 0x66, 0x3c, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, /* 9E */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* 9F */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* A0 */ 0x0c, 0x18, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00, /* A1 */ 0x0c, 0x18, 0x00, 0x38, 0x18, 0x18, 0x3c, 0x00, /* A2 */ 0x0c, 0x18, 0x00, 0x3c, 0x66, 0x66, 0x3c, 0x00, /* A3 */ 0x0c, 0x18, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x00, /* A4 */ 0x36, 0x6c, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x00, /* A5 */ 0x36, 0x6c, 0x00, 0x66, 0x76, 0x6e, 0x66, 0x00, /* A6 */ 0x1c, 0x06, 0x1e, 0x36, 0x1e, 0x00, 0x3e, 0x00, /* A7 */ 0x1c, 0x36, 0x36, 0x36, 0x1c, 0x00, 0x3e, 0x00, /* A8 */ 0x18, 0x00, 0x18, 0x18, 0x30, 0x66, 0x3c, 0x00, /* A9 */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* AA */ 0x7e, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* AB */ 0x40, 0xc0, 0x40, 0x4f, 0x41, 0x0f, 0x08, 0x0f, /* AC */ 0x40, 0xc0, 0x40, 0x48, 0x48, 0x0a, 0x0f, 0x02, /* AD */ 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, /* AE */ 0x00, 0x33, 0x66, 0xcc, 0xcc, 0x66, 0x33, 0x00, /* AF */ 0x00, 0xcc, 0x66, 0x33, 0x33, 0x66, 0xcc, 0x00, /* B0 */ 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, 0x22, 0x88, /* B1 */ 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, /* B2 */ 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, /* B3 */ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, /* B4 */ 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, /* B5 */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, /* B6 */ 0x66, 0x66, 0x66, 0xe6, 0x66, 0x66, 0x66, 0x66, /* B7 */ 0x00, 0x00, 0x00, 0xfe, 0x66, 0x66, 0x66, 0x66, /* B8 */ 0x00, 0x00, 0xf8, 0x18, 0xf8, 0x18, 0x18, 0x18, /* B9 */ 0x66, 0x66, 0xe6, 0x06, 0xe6, 0x66, 0x66, 0x66, /* BA */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* BB */ 0x00, 0x00, 0xfe, 0x06, 0xe6, 0x66, 0x66, 0x66, /* BC */ 0x66, 0x66, 0xe6, 0x06, 0xfe, 0x00, 0x00, 0x00, /* BD */ 0x66, 0x66, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, /* BE */ 0x18, 0x18, 0xf8, 0x18, 0xf8, 0x00, 0x00, 0x00, /* BF */ 0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, /* C0 */ 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, /* C1 */ 0x18, 0x18, 0x18, 0xff, 0x00, 0x00, 0x00, 0x00, /* C2 */ 0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, /* C3 */ 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, /* C4 */ 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, /* C5 */ 0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, /* C6 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, /* C7 */ 0x66, 0x66, 0x66, 0x67, 0x66, 0x66, 0x66, 0x66, /* C8 */ 0x66, 0x66, 0x67, 0x60, 0x7f, 0x00, 0x00, 0x00, /* C9 */ 0x00, 0x00, 0x7f, 0x60, 0x67, 0x66, 0x66, 0x66, /* CA */ 0x66, 0x66, 0xe7, 0x00, 0xff, 0x00, 0x00, 0x00, /* CB */ 0x00, 0x00, 0xff, 0x00, 0xe7, 0x66, 0x66, 0x66, /* CC */ 0x66, 0x66, 0x67, 0x60, 0x67, 0x66, 0x66, 0x66, /* CD */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, /* CE */ 0x66, 0x66, 0xe7, 0x00, 0xe7, 0x66, 0x66, 0x66, /* CF */ 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, /* D0 */ 0x66, 0x66, 0x66, 0xff, 0x00, 0x00, 0x00, 0x00, /* D1 */ 0x00, 0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, /* D2 */ 0x00, 0x00, 0x00, 0xff, 0x66, 0x66, 0x66, 0x66, /* D3 */ 0x66, 0x66, 0x66, 0x7f, 0x00, 0x00, 0x00, 0x00, /* D4 */ 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, /* D5 */ 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, /* D6 */ 0x00, 0x00, 0x00, 0x7f, 0x66, 0x66, 0x66, 0x66, /* D7 */ 0x66, 0x66, 0x66, 0xff, 0x66, 0x66, 0x66, 0x66, /* D8 */ 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, /* D9 */ 0x18, 0x18, 0x18, 0xf8, 0x00, 0x00, 0x00, 0x00, /* DA */ 0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, /* DB */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* DC */ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, /* DD */ 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, /* DE */ 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, /* DF */ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, /* E0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E1 */ 0x3c, 0x66, 0x66, 0x6c, 0x66, 0x66, 0x6c, 0xc0, /* E2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E6 */ 0x00, 0x00, 0x33, 0x33, 0x33, 0x33, 0x3e, 0x60, /* E7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E8 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* E9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EA */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* ED */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* EF */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F0 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F1 */ 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x7e, 0x00, /* F2 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F3 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F4 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F5 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F6 */ 0x00, 0x18, 0x00, 0xff, 0x00, 0x18, 0x00, 0x00, /* F7 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* F8 */ 0x3c, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, /* F9 */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FA */ 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, /* FB */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FC */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; const struct font_desc font_acorn_8x8 = { .idx = ACORN8x8_IDX, .name = "Acorn8x8", .width = 8, .height = 8, .data = acorndata_8x8, #ifdef CONFIG_ARCH_ACORN .pref = 20, #else .pref = 0, #endif };
gpl-2.0
misko/linux-sunxi
modules/wifi/nano-c047.12/WiFiEngine/ewpa/src/eloop_wifiengine.c
100
9860
/* * Event loop - empty template (basic structure, but no OS specific operations) * Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Alternatively, this software may be distributed under the terms of BSD * license. * * See README and COPYING for more details. */ #include "includes.h" #include "common.h" #include "eloop.h" #define static struct eloop_sock { int sock; void *eloop_data; void *user_data; void (*handler)(int sock, void *eloop_ctx, void *sock_ctx); }; struct eloop_timeout { struct os_time time; void *eloop_data; void *user_data; void (*handler)(void *eloop_ctx, void *sock_ctx); struct eloop_timeout *next; }; struct eloop_signal { int sig; void *user_data; void (*handler)(int sig, void *eloop_ctx, void *signal_ctx); int signaled; }; struct eloop_data { void *user_data; int max_sock, reader_count; struct eloop_sock *readers; struct eloop_timeout *timeout; struct eloop_timeout *registered_timeout; driver_timer_id_t timer_id; int signal_count; struct eloop_signal *signals; int signaled; int pending_terminate; int terminate; int reader_table_changed; }; static struct eloop_data eloop; int eloop_init(void *user_data) { memset(&eloop, 0, sizeof(eloop)); eloop.user_data = user_data; DriverEnvironment_GetNewTimer(&eloop.timer_id, TRUE); return 0; } int eloop_register_read_sock(int sock, void (*handler)(int sock, void *eloop_ctx, void *sock_ctx), void *eloop_data, void *user_data) { struct eloop_sock *tmp; tmp = (struct eloop_sock *) os_realloc(eloop.readers, (eloop.reader_count + 1) * sizeof(struct eloop_sock)); if (tmp == NULL) return -1; tmp[eloop.reader_count].sock = sock; tmp[eloop.reader_count].eloop_data = eloop_data; tmp[eloop.reader_count].user_data = user_data; tmp[eloop.reader_count].handler = handler; eloop.reader_count++; eloop.readers = tmp; if (sock > eloop.max_sock) eloop.max_sock = sock; eloop.reader_table_changed = 1; return 0; } void eloop_unregister_read_sock(int sock) { int i; if (eloop.readers == NULL || eloop.reader_count == 0) return; for (i = 0; i < eloop.reader_count; i++) { if (eloop.readers[i].sock == sock) break; } if (i == eloop.reader_count) return; if (i != eloop.reader_count - 1) { memmove(&eloop.readers[i], &eloop.readers[i + 1], (eloop.reader_count - i - 1) * sizeof(struct eloop_sock)); } eloop.reader_count--; eloop.reader_table_changed = 1; } static void eloop_reschedule(void); /* Simulate os_get_time with the internal tick counter, this does not * provide a correct time stamp, but we really only care about a * monotonically increasing time here. * */ /* XXX this does not belong here */ static void os_get_time_mono(struct os_time *t) { #ifdef __rtke__ /* This will wrap after about 15 months. */ unsigned int ticks = DriverEnvironment_GetTicks(); unsigned int H, L; /* avoid overflow by performing calculation in multiple steps */ H = ticks / 100000; L = ticks % 100000; t->sec = H * 923; t->usec = L * 9230; t->sec += t->usec / 1000000; t->usec %= 1000000; #else /* Assuming driver_msec_t is a 32-bit type, this will wrap * after 49 days, or whenever _msec wraps. */ driver_msec_t msec = DriverEnvironment_GetTimestamp_msec(); t->usec = msec % 1000; t->sec = msec - t->usec; t->sec /= 1000; t->usec *= 1000; #endif } static int eloop_timer(void *data, size_t len) { struct os_time now; os_get_time_mono(&now); while(eloop.timeout != NULL && !os_time_before(&now, &eloop.timeout->time)) { struct eloop_timeout *tmp; tmp = eloop.timeout; eloop.timeout = eloop.timeout->next; eloop.registered_timeout = NULL; (*tmp->handler)(tmp->eloop_data, tmp->user_data); os_free(tmp); } eloop_reschedule(); return 0; } static void eloop_reschedule(void) { if(eloop.timeout != NULL && eloop.timeout != eloop.registered_timeout) { struct os_time delay, now; long msec = 0; os_get_time_mono(&now); if(os_time_before(&now, &eloop.timeout->time)) { os_time_sub(&eloop.timeout->time, &now, &delay); msec = delay.sec * 1000; msec += delay.usec / 1000; } if(msec <= 0) msec = 1; DriverEnvironment_RegisterTimerCallback(msec, eloop.timer_id, eloop_timer, FALSE); eloop.registered_timeout = eloop.timeout; } } int eloop_register_timeout(unsigned int secs, unsigned int usecs, void (*handler)(void *eloop_ctx, void *timeout_ctx), void *eloop_data, void *user_data) { struct eloop_timeout *timeout, *tmp, **prev; timeout = (struct eloop_timeout *) os_malloc(sizeof(*timeout)); if (timeout == NULL) return -1; os_get_time_mono(&timeout->time); timeout->time.sec += secs; timeout->time.usec += usecs; while (timeout->time.usec >= 1000000) { timeout->time.sec++; timeout->time.usec -= 1000000; } timeout->eloop_data = eloop_data; timeout->user_data = user_data; timeout->handler = handler; timeout->next = NULL; prev = &eloop.timeout; tmp = eloop.timeout; while(tmp != NULL) { if(os_time_before(&timeout->time, &tmp->time)) break; prev = &tmp->next; tmp = tmp->next; } timeout->next = tmp; *prev = timeout; eloop_reschedule(); return 0; } int eloop_cancel_timeout(void (*handler)(void *eloop_ctx, void *sock_ctx), void *eloop_data, void *user_data) { struct eloop_timeout *timeout, *prev, *next; int removed = 0; prev = NULL; timeout = eloop.timeout; while (timeout != NULL) { next = timeout->next; if (timeout->handler == handler && (timeout->eloop_data == eloop_data || eloop_data == ELOOP_ALL_CTX) && (timeout->user_data == user_data || user_data == ELOOP_ALL_CTX)) { if (prev == NULL) eloop.timeout = next; else prev->next = next; os_free(timeout); removed++; } else prev = timeout; timeout = next; } eloop_reschedule(); return removed; } /* TODO: replace with suitable signal handler */ #if 0 static void eloop_handle_signal(int sig) { int i; eloop.signaled++; for (i = 0; i < eloop.signal_count; i++) { if (eloop.signals[i].sig == sig) { eloop.signals[i].signaled++; break; } } } #endif static void eloop_process_pending_signals(void) { int i; if (eloop.signaled == 0) return; eloop.signaled = 0; if (eloop.pending_terminate) { eloop.pending_terminate = 0; } for (i = 0; i < eloop.signal_count; i++) { if (eloop.signals[i].signaled) { eloop.signals[i].signaled = 0; eloop.signals[i].handler(eloop.signals[i].sig, eloop.user_data, eloop.signals[i].user_data); } } } int eloop_register_signal(int sig, void (*handler)(int sig, void *eloop_ctx, void *signal_ctx), void *user_data) { struct eloop_signal *tmp; tmp = (struct eloop_signal *) os_realloc(eloop.signals, (eloop.signal_count + 1) * sizeof(struct eloop_signal)); if (tmp == NULL) return -1; tmp[eloop.signal_count].sig = sig; tmp[eloop.signal_count].user_data = user_data; tmp[eloop.signal_count].handler = handler; tmp[eloop.signal_count].signaled = 0; eloop.signal_count++; eloop.signals = tmp; /* TODO: register signal handler */ return 0; } int eloop_register_signal_terminate(void (*handler)(int sig, void *eloop_ctx, void *signal_ctx), void *user_data) { #if 0 /* TODO: for example */ int ret = eloop_register_signal(SIGINT, handler, user_data); if (ret == 0) ret = eloop_register_signal(SIGTERM, handler, user_data); return ret; #endif return 0; } int eloop_register_signal_reconfig(void (*handler)(int sig, void *eloop_ctx, void *signal_ctx), void *user_data) { #if 0 /* TODO: for example */ return eloop_register_signal(SIGHUP, handler, user_data); #endif return 0; } void eloop_run(void) { int i; struct os_time tv, now; while (!eloop.terminate && (eloop.timeout || eloop.reader_count > 0)) { if (eloop.timeout) { os_get_time_mono(&now); if (os_time_before(&now, &eloop.timeout->time)) os_time_sub(&eloop.timeout->time, &now, &tv); else tv.sec = tv.usec = 0; } /* * TODO: wait for any event (read socket ready, timeout (tv), * signal */ os_sleep(1, 0); /* just a dummy wait for testing */ eloop_process_pending_signals(); /* check if some registered timeouts have occurred */ if (eloop.timeout) { struct eloop_timeout *tmp; os_get_time_mono(&now); if (!os_time_before(&now, &eloop.timeout->time)) { tmp = eloop.timeout; eloop.timeout = eloop.timeout->next; tmp->handler(tmp->eloop_data, tmp->user_data); os_free(tmp); } } eloop.reader_table_changed = 0; for (i = 0; i < eloop.reader_count; i++) { /* * TODO: call each handler that has pending data to * read */ if (0 /* TODO: eloop.readers[i].sock ready */) { eloop.readers[i].handler( eloop.readers[i].sock, eloop.readers[i].eloop_data, eloop.readers[i].user_data); if (eloop.reader_table_changed) break; } } } } void eloop_terminate(void) { eloop.terminate = 1; } void eloop_destroy(void) { struct eloop_timeout *timeout, *prev; timeout = eloop.timeout; while (timeout != NULL) { prev = timeout; timeout = timeout->next; os_free(prev); } DriverEnvironment_FreeTimer(eloop.timer_id); os_free(eloop.readers); os_free(eloop.signals); } int eloop_terminated(void) { return eloop.terminate; } void eloop_wait_for_read_sock(int sock) { /* * TODO: wait for the file descriptor to have something available for * reading */ } void * eloop_get_user_data(void) { return eloop.user_data; } /* Local Variables: */ /* c-basic-offset: 8 */ /* indent-tabs-mode: t */ /* End: */
gpl-2.0
georgejhunt/olpc-kernel
arch/arm/plat-omap/dma.c
356
35210
/* * linux/arch/arm/plat-omap/dma.c * * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com> * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * Support functions for the OMAP internal DMA channels. * * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ * Converted DMA library into DMA platform driver. * - G, Manjunath Kondaiah <manjugk@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/omap-dma.h> /* * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA * channels that an instance of the SDMA IP block can support. Used * to size arrays. (The actual maximum on a particular SoC may be less * than this -- for example, OMAP1 SDMA instances only support 17 logical * DMA channels.) */ #define MAX_LOGICAL_DMA_CH_COUNT 32 #undef DEBUG #ifndef CONFIG_ARCH_OMAP1 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED, DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED }; enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED }; #endif #define OMAP_DMA_ACTIVE 0x01 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffffffff #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec) static struct omap_system_dma_plat_info *p; static struct omap_dma_dev_attr *d; static void omap_clear_dma(int lch); static int omap_dma_set_prio_lch(int lch, unsigned char read_prio, unsigned char write_prio); static int enable_1510_mode; static u32 errata; static struct omap_dma_global_context_registers { u32 dma_irqenable_l0; u32 dma_irqenable_l1; u32 dma_ocp_sysconfig; u32 dma_gcr; } omap_dma_global_context; struct dma_link_info { int *linked_dmach_q; int no_of_lchs_linked; int q_count; int q_tail; int q_head; int chain_state; int chain_mode; }; static struct dma_link_info *dma_linked_lch; #ifndef CONFIG_ARCH_OMAP1 /* Chain handling macros */ #define OMAP_DMA_CHAIN_QINIT(chain_id) \ do { \ dma_linked_lch[chain_id].q_head = \ dma_linked_lch[chain_id].q_tail = \ dma_linked_lch[chain_id].q_count = 0; \ } while (0) #define OMAP_DMA_CHAIN_QFULL(chain_id) \ (dma_linked_lch[chain_id].no_of_lchs_linked == \ dma_linked_lch[chain_id].q_count) #define OMAP_DMA_CHAIN_QLAST(chain_id) \ do { \ ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \ dma_linked_lch[chain_id].q_count) \ } while (0) #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \ (0 == dma_linked_lch[chain_id].q_count) #define __OMAP_DMA_CHAIN_INCQ(end) \ ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked) #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \ do { \ __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \ dma_linked_lch[chain_id].q_count--; \ } while (0) #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \ do { \ __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \ dma_linked_lch[chain_id].q_count++; \ } while (0) #endif static int dma_lch_count; static int dma_chan_count; static int omap_dma_reserve_channels; static spinlock_t dma_chan_lock; static struct omap_dma_lch *dma_chan; static inline void disable_lnk(int lch); static void omap_disable_channel_irq(int lch); static inline void omap_enable_channel_irq(int lch); #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \ __func__); #ifdef CONFIG_ARCH_OMAP15XX /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */ static int omap_dma_in_1510_mode(void) { return enable_1510_mode; } #else #define omap_dma_in_1510_mode() 0 #endif #ifdef CONFIG_ARCH_OMAP1 static inline void set_gdma_dev(int req, int dev) { u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4; int shift = ((req - 1) % 5) * 6; u32 l; l = omap_readl(reg); l &= ~(0x3f << shift); l |= (dev - 1) << shift; omap_writel(l, reg); } #else #define set_gdma_dev(req, dev) do {} while (0) #define omap_readl(reg) 0 #define omap_writel(val, reg) do {} while (0) #endif #ifdef CONFIG_ARCH_OMAP1 void omap_set_dma_priority(int lch, int dst_port, int priority) { unsigned long reg; u32 l; if (dma_omap1()) { switch (dst_port) { case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */ reg = OMAP_TC_OCPT1_PRIOR; break; case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */ reg = OMAP_TC_OCPT2_PRIOR; break; case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */ reg = OMAP_TC_EMIFF_PRIOR; break; case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */ reg = OMAP_TC_EMIFS_PRIOR; break; default: BUG(); return; } l = omap_readl(reg); l &= ~(0xf << 8); l |= (priority & 0xf) << 8; omap_writel(l, reg); } } #endif #ifdef CONFIG_ARCH_OMAP2PLUS void omap_set_dma_priority(int lch, int dst_port, int priority) { u32 ccr; ccr = p->dma_read(CCR, lch); if (priority) ccr |= (1 << 6); else ccr &= ~(1 << 6); p->dma_write(ccr, CCR, lch); } #endif EXPORT_SYMBOL(omap_set_dma_priority); void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch) { u32 l; l = p->dma_read(CSDP, lch); l &= ~0x03; l |= data_type; p->dma_write(l, CSDP, lch); if (dma_omap1()) { u16 ccr; ccr = p->dma_read(CCR, lch); ccr &= ~(1 << 5); if (sync_mode == OMAP_DMA_SYNC_FRAME) ccr |= 1 << 5; p->dma_write(ccr, CCR, lch); ccr = p->dma_read(CCR2, lch); ccr &= ~(1 << 2); if (sync_mode == OMAP_DMA_SYNC_BLOCK) ccr |= 1 << 2; p->dma_write(ccr, CCR2, lch); } if (dma_omap2plus() && dma_trigger) { u32 val; val = p->dma_read(CCR, lch); /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ val &= ~((1 << 23) | (3 << 19) | 0x1f); val |= (dma_trigger & ~0x1f) << 14; val |= dma_trigger & 0x1f; if (sync_mode & OMAP_DMA_SYNC_FRAME) val |= 1 << 5; else val &= ~(1 << 5); if (sync_mode & OMAP_DMA_SYNC_BLOCK) val |= 1 << 18; else val &= ~(1 << 18); if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) { val &= ~(1 << 24); /* dest synch */ val |= (1 << 23); /* Prefetch */ } else if (src_or_dst_synch) { val |= 1 << 24; /* source synch */ } else { val &= ~(1 << 24); /* dest synch */ } p->dma_write(val, CCR, lch); } p->dma_write(elem_count, CEN, lch); p->dma_write(frame_count, CFN, lch); } EXPORT_SYMBOL(omap_set_dma_transfer_params); void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode) { if (dma_omap2plus()) { u32 csdp; csdp = p->dma_read(CSDP, lch); csdp &= ~(0x3 << 16); csdp |= (mode << 16); p->dma_write(csdp, CSDP, lch); } } EXPORT_SYMBOL(omap_set_dma_write_mode); void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode) { if (dma_omap1() && !dma_omap15xx()) { u32 l; l = p->dma_read(LCH_CTRL, lch); l &= ~0x7; l |= mode; p->dma_write(l, LCH_CTRL, lch); } } EXPORT_SYMBOL(omap_set_dma_channel_mode); /* Note that src_port is only for omap1 */ void omap_set_dma_src_params(int lch, int src_port, int src_amode, unsigned long src_start, int src_ei, int src_fi) { u32 l; if (dma_omap1()) { u16 w; w = p->dma_read(CSDP, lch); w &= ~(0x1f << 2); w |= src_port << 2; p->dma_write(w, CSDP, lch); } l = p->dma_read(CCR, lch); l &= ~(0x03 << 12); l |= src_amode << 12; p->dma_write(l, CCR, lch); p->dma_write(src_start, CSSA, lch); p->dma_write(src_ei, CSEI, lch); p->dma_write(src_fi, CSFI, lch); } EXPORT_SYMBOL(omap_set_dma_src_params); void omap_set_dma_params(int lch, struct omap_dma_channel_params *params) { omap_set_dma_transfer_params(lch, params->data_type, params->elem_count, params->frame_count, params->sync_mode, params->trigger, params->src_or_dst_synch); omap_set_dma_src_params(lch, params->src_port, params->src_amode, params->src_start, params->src_ei, params->src_fi); omap_set_dma_dest_params(lch, params->dst_port, params->dst_amode, params->dst_start, params->dst_ei, params->dst_fi); if (params->read_prio || params->write_prio) omap_dma_set_prio_lch(lch, params->read_prio, params->write_prio); } EXPORT_SYMBOL(omap_set_dma_params); void omap_set_dma_src_data_pack(int lch, int enable) { u32 l; l = p->dma_read(CSDP, lch); l &= ~(1 << 6); if (enable) l |= (1 << 6); p->dma_write(l, CSDP, lch); } EXPORT_SYMBOL(omap_set_dma_src_data_pack); void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = p->dma_read(CSDP, lch); l &= ~(0x03 << 7); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (dma_omap2plus()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (dma_omap2plus()) { burst = 0x2; break; } /* * not supported by current hardware on OMAP1 * w |= (0x03 << 7); * fall through */ case OMAP_DMA_DATA_BURST_16: if (dma_omap2plus()) { burst = 0x3; break; } /* * OMAP1 don't support burst 16 * fall through */ default: BUG(); } l |= (burst << 7); p->dma_write(l, CSDP, lch); } EXPORT_SYMBOL(omap_set_dma_src_burst_mode); /* Note that dest_port is only for OMAP1 */ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, unsigned long dest_start, int dst_ei, int dst_fi) { u32 l; if (dma_omap1()) { l = p->dma_read(CSDP, lch); l &= ~(0x1f << 9); l |= dest_port << 9; p->dma_write(l, CSDP, lch); } l = p->dma_read(CCR, lch); l &= ~(0x03 << 14); l |= dest_amode << 14; p->dma_write(l, CCR, lch); p->dma_write(dest_start, CDSA, lch); p->dma_write(dst_ei, CDEI, lch); p->dma_write(dst_fi, CDFI, lch); } EXPORT_SYMBOL(omap_set_dma_dest_params); void omap_set_dma_dest_data_pack(int lch, int enable) { u32 l; l = p->dma_read(CSDP, lch); l &= ~(1 << 13); if (enable) l |= 1 << 13; p->dma_write(l, CSDP, lch); } EXPORT_SYMBOL(omap_set_dma_dest_data_pack); void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = p->dma_read(CSDP, lch); l &= ~(0x03 << 14); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (dma_omap2plus()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (dma_omap2plus()) burst = 0x2; else burst = 0x3; break; case OMAP_DMA_DATA_BURST_16: if (dma_omap2plus()) { burst = 0x3; break; } /* * OMAP1 don't support burst 16 * fall through */ default: printk(KERN_ERR "Invalid DMA burst mode\n"); BUG(); return; } l |= (burst << 14); p->dma_write(l, CSDP, lch); } EXPORT_SYMBOL(omap_set_dma_dest_burst_mode); static inline void omap_enable_channel_irq(int lch) { /* Clear CSR */ if (dma_omap1()) p->dma_read(CSR, lch); else p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); /* Enable some nice interrupts. */ p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch); } static inline void omap_disable_channel_irq(int lch) { /* disable channel interrupts */ p->dma_write(0, CICR, lch); /* Clear CSR */ if (dma_omap1()) p->dma_read(CSR, lch); else p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch); } void omap_enable_dma_irq(int lch, u16 bits) { dma_chan[lch].enabled_irqs |= bits; } EXPORT_SYMBOL(omap_enable_dma_irq); void omap_disable_dma_irq(int lch, u16 bits) { dma_chan[lch].enabled_irqs &= ~bits; } EXPORT_SYMBOL(omap_disable_dma_irq); static inline void enable_lnk(int lch) { u32 l; l = p->dma_read(CLNK_CTRL, lch); if (dma_omap1()) l &= ~(1 << 14); /* Set the ENABLE_LNK bits */ if (dma_chan[lch].next_lch != -1) l = dma_chan[lch].next_lch | (1 << 15); #ifndef CONFIG_ARCH_OMAP1 if (dma_omap2plus()) if (dma_chan[lch].next_linked_ch != -1) l = dma_chan[lch].next_linked_ch | (1 << 15); #endif p->dma_write(l, CLNK_CTRL, lch); } static inline void disable_lnk(int lch) { u32 l; l = p->dma_read(CLNK_CTRL, lch); /* Disable interrupts */ omap_disable_channel_irq(lch); if (dma_omap1()) { /* Set the STOP_LNK bit */ l |= 1 << 14; } if (dma_omap2plus()) { /* Clear the ENABLE_LNK bit */ l &= ~(1 << 15); } p->dma_write(l, CLNK_CTRL, lch); dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; } static inline void omap2_enable_irq_lch(int lch) { u32 val; unsigned long flags; if (dma_omap1()) return; spin_lock_irqsave(&dma_chan_lock, flags); /* clear IRQ STATUS */ p->dma_write(1 << lch, IRQSTATUS_L0, lch); /* Enable interrupt */ val = p->dma_read(IRQENABLE_L0, lch); val |= 1 << lch; p->dma_write(val, IRQENABLE_L0, lch); spin_unlock_irqrestore(&dma_chan_lock, flags); } static inline void omap2_disable_irq_lch(int lch) { u32 val; unsigned long flags; if (dma_omap1()) return; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupt */ val = p->dma_read(IRQENABLE_L0, lch); val &= ~(1 << lch); p->dma_write(val, IRQENABLE_L0, lch); /* clear IRQ STATUS */ p->dma_write(1 << lch, IRQSTATUS_L0, lch); spin_unlock_irqrestore(&dma_chan_lock, flags); } int omap_request_dma(int dev_id, const char *dev_name, void (*callback)(int lch, u16 ch_status, void *data), void *data, int *dma_ch_out) { int ch, free_ch = -1; unsigned long flags; struct omap_dma_lch *chan; WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine"); spin_lock_irqsave(&dma_chan_lock, flags); for (ch = 0; ch < dma_chan_count; ch++) { if (free_ch == -1 && dma_chan[ch].dev_id == -1) { free_ch = ch; /* Exit after first free channel found */ break; } } if (free_ch == -1) { spin_unlock_irqrestore(&dma_chan_lock, flags); return -EBUSY; } chan = dma_chan + free_ch; chan->dev_id = dev_id; if (p->clear_lch_regs) p->clear_lch_regs(free_ch); if (dma_omap2plus()) omap_clear_dma(free_ch); spin_unlock_irqrestore(&dma_chan_lock, flags); chan->dev_name = dev_name; chan->callback = callback; chan->data = data; chan->flags = 0; #ifndef CONFIG_ARCH_OMAP1 if (dma_omap2plus()) { chan->chain_id = -1; chan->next_linked_ch = -1; } #endif chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ; if (dma_omap1()) chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ; else if (dma_omap2plus()) chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ | OMAP2_DMA_TRANS_ERR_IRQ; if (dma_omap16xx()) { /* If the sync device is set, configure it dynamically. */ if (dev_id != 0) { set_gdma_dev(free_ch + 1, dev_id); dev_id = free_ch + 1; } /* * Disable the 1510 compatibility mode and set the sync device * id. */ p->dma_write(dev_id | (1 << 10), CCR, free_ch); } else if (dma_omap1()) { p->dma_write(dev_id, CCR, free_ch); } if (dma_omap2plus()) { omap_enable_channel_irq(free_ch); omap2_enable_irq_lch(free_ch); } *dma_ch_out = free_ch; return 0; } EXPORT_SYMBOL(omap_request_dma); void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { pr_err("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } /* Disable interrupt for logical channel */ if (dma_omap2plus()) omap2_disable_irq_lch(lch); /* Disable all DMA interrupts for the channel. */ omap_disable_channel_irq(lch); /* Make sure the DMA transfer is stopped. */ p->dma_write(0, CCR, lch); /* Clear registers */ if (dma_omap2plus()) omap_clear_dma(lch); spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); } EXPORT_SYMBOL(omap_free_dma); /** * @brief omap_dma_set_global_params : Set global priority settings for dma * * @param arb_rate * @param max_fifo_depth * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM * DMA_THREAD_RESERVE_ONET * DMA_THREAD_RESERVE_TWOT * DMA_THREAD_RESERVE_THREET */ void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams) { u32 reg; if (dma_omap1()) { printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__); return; } if (max_fifo_depth == 0) max_fifo_depth = 1; if (arb_rate == 0) arb_rate = 1; reg = 0xff & max_fifo_depth; reg |= (0x3 & tparams) << 12; reg |= (arb_rate & 0xff) << 16; p->dma_write(reg, GCR, 0); } EXPORT_SYMBOL(omap_dma_set_global_params); /** * @brief omap_dma_set_prio_lch : Set channel wise priority settings * * @param lch * @param read_prio - Read priority * @param write_prio - Write priority * Both of the above can be set with one of the following values : * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW */ static int omap_dma_set_prio_lch(int lch, unsigned char read_prio, unsigned char write_prio) { u32 l; if (unlikely((lch < 0 || lch >= dma_lch_count))) { printk(KERN_ERR "Invalid channel id\n"); return -EINVAL; } l = p->dma_read(CCR, lch); l &= ~((1 << 6) | (1 << 26)); if (d->dev_caps & IS_RW_PRIORITY) l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); else l |= ((read_prio & 0x1) << 6); p->dma_write(l, CCR, lch); return 0; } /* * Clears any DMA state so the DMA engine is ready to restart with new buffers * through omap_start_dma(). Any buffers in flight are discarded. */ static void omap_clear_dma(int lch) { unsigned long flags; local_irq_save(flags); p->clear_dma(lch); local_irq_restore(flags); } void omap_start_dma(int lch) { u32 l; /* * The CPC/CDAC register needs to be initialized to zero * before starting dma transfer. */ if (dma_omap15xx()) p->dma_write(0, CPC, lch); else p->dma_write(0, CDAC, lch); if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch; char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT]; /* Set the link register of the first channel */ enable_lnk(lch); memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); dma_chan_link_map[lch] = 1; cur_lch = dma_chan[lch].next_lch; do { next_lch = dma_chan[cur_lch].next_lch; /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; enable_lnk(cur_lch); omap_enable_channel_irq(cur_lch); cur_lch = next_lch; } while (next_lch != -1); } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS)) p->dma_write(lch, CLNK_CTRL, lch); omap_enable_channel_irq(lch); l = p->dma_read(CCR, lch); if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING)) l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; /* * As dma_write() uses IO accessors which are weakly ordered, there * is no guarantee that data in coherent DMA memory will be visible * to the DMA device. Add a memory barrier here to ensure that any * such data is visible prior to enabling DMA. */ mb(); p->dma_write(l, CCR, lch); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; } EXPORT_SYMBOL(omap_start_dma); void omap_stop_dma(int lch) { u32 l; /* Disable all interrupts on the channel */ omap_disable_channel_irq(lch); l = p->dma_read(CCR, lch); if (IS_DMA_ERRATA(DMA_ERRATA_i541) && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { int i = 0; u32 sys_cf; /* Configure No-Standby */ l = p->dma_read(OCP_SYSCONFIG, lch); sys_cf = l; l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); p->dma_write(l , OCP_SYSCONFIG, 0); l = p->dma_read(CCR, lch); l &= ~OMAP_DMA_CCR_EN; p->dma_write(l, CCR, lch); /* Wait for sDMA FIFO drain */ l = p->dma_read(CCR, lch); while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | OMAP_DMA_CCR_WR_ACTIVE))) { udelay(5); i++; l = p->dma_read(CCR, lch); } if (i >= 100) pr_err("DMA drain did not complete on lch %d\n", lch); /* Restore OCP_SYSCONFIG */ p->dma_write(sys_cf, OCP_SYSCONFIG, lch); } else { l &= ~OMAP_DMA_CCR_EN; p->dma_write(l, CCR, lch); } /* * Ensure that data transferred by DMA is visible to any access * after DMA has been disabled. This is important for coherent * DMA regions. */ mb(); if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT]; memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map)); do { /* The loop case: we've been here already */ if (dma_chan_link_map[cur_lch]) break; /* Mark the current channel */ dma_chan_link_map[cur_lch] = 1; disable_lnk(cur_lch); next_lch = dma_chan[cur_lch].next_lch; cur_lch = next_lch; } while (next_lch != -1); } dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; } EXPORT_SYMBOL(omap_stop_dma); /* * Allows changing the DMA callback function or data. This may be needed if * the driver shares a single DMA channel for multiple dma triggers. */ int omap_set_dma_callback(int lch, void (*callback)(int lch, u16 ch_status, void *data), void *data) { unsigned long flags; if (lch < 0) return -ENODEV; spin_lock_irqsave(&dma_chan_lock, flags); if (dma_chan[lch].dev_id == -1) { printk(KERN_ERR "DMA callback for not set for free channel\n"); spin_unlock_irqrestore(&dma_chan_lock, flags); return -EINVAL; } dma_chan[lch].callback = callback; dma_chan[lch].data = data; spin_unlock_irqrestore(&dma_chan_lock, flags); return 0; } EXPORT_SYMBOL(omap_set_dma_callback); /* * Returns current physical source address for the given DMA channel. * If the channel is running the caller must disable interrupts prior calling * this function and process the returned value before re-enabling interrupt to * prevent races with the interrupt handler. Note that in continuous mode there * is a chance for CSSA_L register overflow between the two reads resulting * in incorrect return value. */ dma_addr_t omap_get_dma_src_pos(int lch) { dma_addr_t offset = 0; if (dma_omap15xx()) offset = p->dma_read(CPC, lch); else offset = p->dma_read(CSAC, lch); if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0) offset = p->dma_read(CSAC, lch); if (!dma_omap15xx()) { /* * CDAC == 0 indicates that the DMA transfer on the channel has * not been started (no data has been transferred so far). * Return the programmed source start address in this case. */ if (likely(p->dma_read(CDAC, lch))) offset = p->dma_read(CSAC, lch); else offset = p->dma_read(CSSA, lch); } if (dma_omap1()) offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000); return offset; } EXPORT_SYMBOL(omap_get_dma_src_pos); /* * Returns current physical destination address for the given DMA channel. * If the channel is running the caller must disable interrupts prior calling * this function and process the returned value before re-enabling interrupt to * prevent races with the interrupt handler. Note that in continuous mode there * is a chance for CDSA_L register overflow between the two reads resulting * in incorrect return value. */ dma_addr_t omap_get_dma_dst_pos(int lch) { dma_addr_t offset = 0; if (dma_omap15xx()) offset = p->dma_read(CPC, lch); else offset = p->dma_read(CDAC, lch); /* * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is * read before the DMA controller finished disabling the channel. */ if (!dma_omap15xx() && offset == 0) { offset = p->dma_read(CDAC, lch); /* * CDAC == 0 indicates that the DMA transfer on the channel has * not been started (no data has been transferred so far). * Return the programmed destination start address in this case. */ if (unlikely(!offset)) offset = p->dma_read(CDSA, lch); } if (dma_omap1()) offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000); return offset; } EXPORT_SYMBOL(omap_get_dma_dst_pos); int omap_get_dma_active_status(int lch) { return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0; } EXPORT_SYMBOL(omap_get_dma_active_status); int omap_dma_running(void) { int lch; if (dma_omap1()) if (omap_lcd_dma_running()) return 1; for (lch = 0; lch < dma_chan_count; lch++) if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) return 1; return 0; } /* * lch_queue DMA will start right after lch_head one is finished. * For this DMA link to start, you still need to start (see omap_start_dma) * the first one. That will fire up the entire queue. */ void omap_dma_link_lch(int lch_head, int lch_queue) { if (omap_dma_in_1510_mode()) { if (lch_head == lch_queue) { p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8), CCR, lch_head); return; } printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); BUG(); return; } if ((dma_chan[lch_head].dev_id == -1) || (dma_chan[lch_queue].dev_id == -1)) { pr_err("omap_dma: trying to link non requested channels\n"); dump_stack(); } dma_chan[lch_head].next_lch = lch_queue; } EXPORT_SYMBOL(omap_dma_link_lch); /*----------------------------------------------------------------------------*/ #ifdef CONFIG_ARCH_OMAP1 static int omap1_dma_handle_ch(int ch) { u32 csr; if (enable_1510_mode && ch >= 6) { csr = dma_chan[ch].saved_csr; dma_chan[ch].saved_csr = 0; } else csr = p->dma_read(CSR, ch); if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) { dma_chan[ch + 6].saved_csr = csr >> 7; csr &= 0x7f; } if ((csr & 0x3f) == 0) return 0; if (unlikely(dma_chan[ch].dev_id == -1)) { pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n", ch, csr); return 0; } if (unlikely(csr & OMAP1_DMA_TOUT_IRQ)) pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id); if (unlikely(csr & OMAP_DMA_DROP_IRQ)) pr_warn("DMA synchronization event drop occurred with device %d\n", dma_chan[ch].dev_id); if (likely(csr & OMAP_DMA_BLOCK_IRQ)) dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; if (likely(dma_chan[ch].callback != NULL)) dma_chan[ch].callback(ch, csr, dma_chan[ch].data); return 1; } static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id) { int ch = ((int) dev_id) - 1; int handled = 0; for (;;) { int handled_now = 0; handled_now += omap1_dma_handle_ch(ch); if (enable_1510_mode && dma_chan[ch + 6].saved_csr) handled_now += omap1_dma_handle_ch(ch + 6); if (!handled_now) break; handled += handled_now; } return handled ? IRQ_HANDLED : IRQ_NONE; } #else #define omap1_dma_irq_handler NULL #endif #ifdef CONFIG_ARCH_OMAP2PLUS static int omap2_dma_handle_ch(int ch) { u32 status = p->dma_read(CSR, ch); if (!status) { if (printk_ratelimit()) pr_warn("Spurious DMA IRQ for lch %d\n", ch); p->dma_write(1 << ch, IRQSTATUS_L0, ch); return 0; } if (unlikely(dma_chan[ch].dev_id == -1)) { if (printk_ratelimit()) pr_warn("IRQ %04x for non-allocated DMA channel %d\n", status, ch); return 0; } if (unlikely(status & OMAP_DMA_DROP_IRQ)) pr_info("DMA synchronization event drop occurred with device %d\n", dma_chan[ch].dev_id); if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) { printk(KERN_INFO "DMA transaction error with device %d\n", dma_chan[ch].dev_id); if (IS_DMA_ERRATA(DMA_ERRATA_i378)) { u32 ccr; ccr = p->dma_read(CCR, ch); ccr &= ~OMAP_DMA_CCR_EN; p->dma_write(ccr, CCR, ch); dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE; } } if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ)) printk(KERN_INFO "DMA secure error with device %d\n", dma_chan[ch].dev_id); if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ)) printk(KERN_INFO "DMA misaligned error with device %d\n", dma_chan[ch].dev_id); p->dma_write(status, CSR, ch); p->dma_write(1 << ch, IRQSTATUS_L0, ch); /* read back the register to flush the write */ p->dma_read(IRQSTATUS_L0, ch); /* If the ch is not chained then chain_id will be -1 */ if (dma_chan[ch].chain_id != -1) { int chain_id = dma_chan[ch].chain_id; dma_chan[ch].state = DMA_CH_NOTSTARTED; if (p->dma_read(CLNK_CTRL, ch) & (1 << 15)) dma_chan[dma_chan[ch].next_linked_ch].state = DMA_CH_STARTED; if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) disable_lnk(ch); if (!OMAP_DMA_CHAIN_QEMPTY(chain_id)) OMAP_DMA_CHAIN_INCQHEAD(chain_id); status = p->dma_read(CSR, ch); p->dma_write(status, CSR, ch); } if (likely(dma_chan[ch].callback != NULL)) dma_chan[ch].callback(ch, status, dma_chan[ch].data); return 0; } /* STATUS register count is from 1-32 while our is 0-31 */ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id) { u32 val, enable_reg; int i; val = p->dma_read(IRQSTATUS_L0, 0); if (val == 0) { if (printk_ratelimit()) printk(KERN_WARNING "Spurious DMA IRQ\n"); return IRQ_HANDLED; } enable_reg = p->dma_read(IRQENABLE_L0, 0); val &= enable_reg; /* Dispatch only relevant interrupts */ for (i = 0; i < dma_lch_count && val != 0; i++) { if (val & 1) omap2_dma_handle_ch(i); val >>= 1; } return IRQ_HANDLED; } static struct irqaction omap24xx_dma_irq = { .name = "DMA", .handler = omap2_dma_irq_handler, }; #else static struct irqaction omap24xx_dma_irq; #endif /*----------------------------------------------------------------------------*/ /* * Note that we are currently using only IRQENABLE_L0 and L1. * As the DSP may be using IRQENABLE_L2 and L3, let's not * touch those for now. */ void omap_dma_global_context_save(void) { omap_dma_global_context.dma_irqenable_l0 = p->dma_read(IRQENABLE_L0, 0); omap_dma_global_context.dma_irqenable_l1 = p->dma_read(IRQENABLE_L1, 0); omap_dma_global_context.dma_ocp_sysconfig = p->dma_read(OCP_SYSCONFIG, 0); omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0); } void omap_dma_global_context_restore(void) { int ch; p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0); p->dma_write(omap_dma_global_context.dma_ocp_sysconfig, OCP_SYSCONFIG, 0); p->dma_write(omap_dma_global_context.dma_irqenable_l0, IRQENABLE_L0, 0); p->dma_write(omap_dma_global_context.dma_irqenable_l1, IRQENABLE_L1, 0); if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) p->dma_write(0x3 , IRQSTATUS_L0, 0); for (ch = 0; ch < dma_chan_count; ch++) if (dma_chan[ch].dev_id != -1) omap_clear_dma(ch); } struct omap_system_dma_plat_info *omap_get_plat_info(void) { return p; } EXPORT_SYMBOL_GPL(omap_get_plat_info); static int omap_system_dma_probe(struct platform_device *pdev) { int ch, ret = 0; int dma_irq; char irq_name[4]; int irq_rel; p = pdev->dev.platform_data; if (!p) { dev_err(&pdev->dev, "%s: System DMA initialized without platform data\n", __func__); return -EINVAL; } d = p->dma_attr; errata = p->errata; if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels && (omap_dma_reserve_channels < d->lch_count)) d->lch_count = omap_dma_reserve_channels; dma_lch_count = d->lch_count; dma_chan_count = dma_lch_count; enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count, sizeof(struct omap_dma_lch), GFP_KERNEL); if (!dma_chan) { dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__); return -ENOMEM; } if (dma_omap2plus()) { dma_linked_lch = kzalloc(sizeof(struct dma_link_info) * dma_lch_count, GFP_KERNEL); if (!dma_linked_lch) { ret = -ENOMEM; goto exit_dma_lch_fail; } } spin_lock_init(&dma_chan_lock); for (ch = 0; ch < dma_chan_count; ch++) { omap_clear_dma(ch); if (dma_omap2plus()) omap2_disable_irq_lch(ch); dma_chan[ch].dev_id = -1; dma_chan[ch].next_lch = -1; if (ch >= 6 && enable_1510_mode) continue; if (dma_omap1()) { /* * request_irq() doesn't like dev_id (ie. ch) being * zero, so we have to kludge around this. */ sprintf(&irq_name[0], "%d", ch); dma_irq = platform_get_irq_byname(pdev, irq_name); if (dma_irq < 0) { ret = dma_irq; goto exit_dma_irq_fail; } /* INT_DMA_LCD is handled in lcd_dma.c */ if (dma_irq == INT_DMA_LCD) continue; ret = request_irq(dma_irq, omap1_dma_irq_handler, 0, "DMA", (void *) (ch + 1)); if (ret != 0) goto exit_dma_irq_fail; } } if (d->dev_caps & IS_RW_PRIORITY) omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, DMA_DEFAULT_FIFO_DEPTH, 0); if (dma_omap2plus() && !(d->dev_caps & DMA_ENGINE_HANDLE_IRQ)) { strcpy(irq_name, "0"); dma_irq = platform_get_irq_byname(pdev, irq_name); if (dma_irq < 0) { dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq); ret = dma_irq; goto exit_dma_lch_fail; } ret = setup_irq(dma_irq, &omap24xx_dma_irq); if (ret) { dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n", dma_irq, ret); goto exit_dma_lch_fail; } } /* reserve dma channels 0 and 1 in high security devices on 34xx */ if (d->dev_caps & HS_CHANNELS_RESERVED) { pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n"); dma_chan[0].dev_id = 0; dma_chan[1].dev_id = 1; } p->show_dma_caps(); return 0; exit_dma_irq_fail: dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n", dma_irq, ret); for (irq_rel = 0; irq_rel < ch; irq_rel++) { dma_irq = platform_get_irq(pdev, irq_rel); free_irq(dma_irq, (void *)(irq_rel + 1)); } exit_dma_lch_fail: return ret; } static int omap_system_dma_remove(struct platform_device *pdev) { int dma_irq; if (dma_omap2plus()) { char irq_name[4]; strcpy(irq_name, "0"); dma_irq = platform_get_irq_byname(pdev, irq_name); if (dma_irq >= 0) remove_irq(dma_irq, &omap24xx_dma_irq); } else { int irq_rel = 0; for ( ; irq_rel < dma_chan_count; irq_rel++) { dma_irq = platform_get_irq(pdev, irq_rel); free_irq(dma_irq, (void *)(irq_rel + 1)); } } return 0; } static struct platform_driver omap_system_dma_driver = { .probe = omap_system_dma_probe, .remove = omap_system_dma_remove, .driver = { .name = "omap_dma_system" }, }; static int __init omap_system_dma_init(void) { return platform_driver_register(&omap_system_dma_driver); } arch_initcall(omap_system_dma_init); static void __exit omap_system_dma_exit(void) { platform_driver_unregister(&omap_system_dma_driver); } MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc"); /* * Reserve the omap SDMA channels using cmdline bootarg * "omap_dma_reserve_ch=". The valid range is 1 to 32 */ static int __init omap_dma_cmdline_reserve_ch(char *str) { if (get_option(&str, &omap_dma_reserve_channels) != 1) omap_dma_reserve_channels = 0; return 1; } __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
gpl-2.0
earlish/linux-fslc
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
612
73610
/* bnx2fc_fcoe.c: QLogic NetXtreme II Linux FCoE offload driver. * This file contains the code that interacts with libfc, libfcoe, * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * * Copyright (c) 2008 - 2013 Broadcom Corporation * Copyright (c) 2014, QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) */ #include "bnx2fc.h" static struct list_head adapter_list; static struct list_head if_list; static u32 adapter_count; static DEFINE_MUTEX(bnx2fc_dev_lock); DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION #define DRV_MODULE_RELDATE "Dec 11, 2013" static char version[] = "QLogic NetXtreme II FCoE Driver " DRV_MODULE_NAME \ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>"); MODULE_DESCRIPTION("QLogic NetXtreme II BCM57710 FCoE Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #define BNX2FC_MAX_QUEUE_DEPTH 256 #define BNX2FC_MIN_QUEUE_DEPTH 32 #define FCOE_WORD_TO_BYTE 4 static struct scsi_transport_template *bnx2fc_transport_template; static struct scsi_transport_template *bnx2fc_vport_xport_template; struct workqueue_struct *bnx2fc_wq; /* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. * Here the io threads are per cpu but the l2 thread is just one */ struct fcoe_percpu_s bnx2fc_global; DEFINE_SPINLOCK(bnx2fc_global_lock); static struct cnic_ulp_ops bnx2fc_cnic_cb; static struct libfc_function_template bnx2fc_libfc_fcn_templ; static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; static struct fc_function_template bnx2fc_vport_xport_function; static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); static int bnx2fc_disable(struct net_device *netdev); /* fcoe_syfs control interface handlers */ static int bnx2fc_ctlr_alloc(struct net_device *netdev); static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev); static void bnx2fc_recv_frame(struct sk_buff *skb); static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); static int bnx2fc_lport_config(struct fc_lport *lport); static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); static void bnx2fc_destroy_work(struct work_struct *work); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *phys_dev); static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); static int bnx2fc_fw_init(struct bnx2fc_hba *hba); static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); static void bnx2fc_port_shutdown(struct fc_lport *lport); static void bnx2fc_stop(struct bnx2fc_interface *interface); static int __init bnx2fc_mod_init(void); static void __exit bnx2fc_mod_exit(void); unsigned int bnx2fc_debug_level; module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); static int bnx2fc_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); /* notification function for CPU hotplug events */ static struct notifier_block bnx2fc_cpu_notifier = { .notifier_call = bnx2fc_cpu_callback, }; static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) { return ((struct bnx2fc_interface *) ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; } static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) { struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev); struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); fcf_dev->vlan_id = fcoe->vlan_id; } static void bnx2fc_clean_rx_queue(struct fc_lport *lp) { struct fcoe_percpu_s *bg; struct fcoe_rcv_info *fr; struct sk_buff_head *list; struct sk_buff *skb, *next; struct sk_buff *head; bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); list = &bg->fcoe_rx_list; head = list->next; for (skb = head; skb != (struct sk_buff *)list; skb = next) { next = skb->next; fr = fcoe_dev_from_skb(skb); if (fr->fr_dev == lp) { __skb_unlink(skb, list); kfree_skb(skb); } } spin_unlock_bh(&bg->fcoe_rx_list.lock); } int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) { int rc; spin_lock(&bnx2fc_global_lock); rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); spin_unlock(&bnx2fc_global_lock); return rc; } static void bnx2fc_abort_io(struct fc_lport *lport) { /* * This function is no-op for bnx2fc, but we do * not want to leave it as NULL either, as libfc * can call the default function which is * fc_fcp_abort_io. */ } static void bnx2fc_cleanup(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_rport *tgt; int i; BNX2FC_MISC_DBG("Entered %s\n", __func__); mutex_lock(&hba->hba_mutex); spin_lock_bh(&hba->hba_lock); for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { tgt = hba->tgt_ofld_list[i]; if (tgt) { /* Cleanup IOs belonging to requested vport */ if (tgt->port == port) { spin_unlock_bh(&hba->hba_lock); BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); bnx2fc_flush_active_ios(tgt); spin_lock_bh(&hba->hba_lock); } } } spin_unlock_bh(&hba->hba_lock); mutex_unlock(&hba->hba_mutex); } static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_rport_priv *rdata = tgt->rdata; struct fc_frame_header *fh; int rc = 0; fh = fc_frame_header_get(fp); BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " "r_ctl = 0x%x\n", rdata->ids.port_id, ntohs(fh->fh_ox_id), fh->fh_r_ctl); if ((fh->fh_type == FC_TYPE_ELS) && (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { switch (fc_frame_payload_op(fp)) { case ELS_ADISC: rc = bnx2fc_send_adisc(tgt, fp); break; case ELS_LOGO: rc = bnx2fc_send_logo(tgt, fp); break; case ELS_RLS: rc = bnx2fc_send_rls(tgt, fp); break; default: break; } } else if ((fh->fh_type == FC_TYPE_BLS) && (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); else { BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " "rctl 0x%x thru non-offload path\n", fh->fh_type, fh->fh_r_ctl); return -ENODEV; } if (rc) return -ENOMEM; else return 0; } /** * bnx2fc_xmit - bnx2fc's FCoE frame transmit function * * @lport: the associated local port * @fp: the fc_frame to be transmitted */ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) { struct ethhdr *eh; struct fcoe_crc_eof *cp; struct sk_buff *skb; struct fc_frame_header *fh; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct bnx2fc_hba *hba; struct fcoe_port *port; struct fcoe_hdr *hp; struct bnx2fc_rport *tgt; struct fc_stats *stats; u8 sof, eof; u32 crc; unsigned int hlen, tlen, elen; int wlen, rc = 0; port = (struct fcoe_port *)lport_priv(lport); interface = port->priv; ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; fh = fc_frame_header_get(fp); skb = fp_skb(fp); if (!lport->link_up) { BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); kfree_skb(skb); return 0; } if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { if (!ctlr->sel_fcf) { BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); kfree_skb(skb); return -EINVAL; } if (fcoe_ctlr_els_send(ctlr, lport, skb)) return 0; } sof = fr_sof(fp); eof = fr_eof(fp); /* * Snoop the frame header to check if the frame is for * an offloaded session */ /* * tgt_ofld_list access is synchronized using * both hba mutex and hba lock. Atleast hba mutex or * hba lock needs to be held for read access. */ spin_lock_bh(&hba->hba_lock); tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { /* This frame is for offloaded session */ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); spin_unlock_bh(&hba->hba_lock); rc = bnx2fc_xmit_l2_frame(tgt, fp); if (rc != -ENODEV) { kfree_skb(skb); return rc; } } else { spin_unlock_bh(&hba->hba_lock); } elen = sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; skb->ip_summed = CHECKSUM_NONE; crc = fcoe_fc_crc(fp); /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; if (bnx2fc_get_paged_crc_eof(skb, tlen)) { kfree_skb(skb); return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; } else { cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); } memset(cp, 0, sizeof(*cp)); cp->fcoe_eof = eof; cp->fcoe_crc32 = cpu_to_le32(~crc); if (skb_is_nonlinear(skb)) { kunmap_atomic(cp); cp = NULL; } /* adjust skb network/transport offsets to match mac/fcoe/port */ skb_push(skb, elen + hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); skb->dev = interface->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); eh->h_proto = htons(ETH_P_FCOE); if (ctlr->map_dest) fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); else /* insert GW address */ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); else memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); hp = (struct fcoe_hdr *)(eh + 1); memset(hp, 0, sizeof(*hp)); if (FC_FCOE_VER) FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); hp->fcoe_sof = sof; /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ if (lport->seq_offload && fr_max_payload(fp)) { skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; skb_shinfo(skb)->gso_size = fr_max_payload(fp); } else { skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; } /*update tx stats */ stats = per_cpu_ptr(lport->stats, get_cpu()); stats->TxFrames++; stats->TxWords += wlen; put_cpu(); /* send down to lld */ fr_dev(fp) = lport; if (port->fcoe_pending_queue.qlen) fcoe_check_wait_queue(lport, skb); else if (fcoe_start_io(skb)) fcoe_check_wait_queue(lport, skb); return 0; } /** * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ * * @skb: the receive socket buffer * @dev: associated net device * @ptype: context * @olddev: last device * * This function receives the packet and builds FC frame and passes it up */ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *olddev) { struct fc_lport *lport; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; struct sk_buff *tmp_skb; unsigned short oxid; interface = container_of(ptype, struct bnx2fc_interface, fcoe_packet_type); ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; if (unlikely(lport == NULL)) { printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); goto err; } tmp_skb = skb_share_check(skb, GFP_ATOMIC); if (!tmp_skb) goto err; skb = tmp_skb; if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); goto err; } /* * Check for minimum frame length, and make sure required FCoE * and FC headers are pulled into the linear data area. */ if (unlikely((skb->len < FCOE_MIN_FRAME) || !pskb_may_pull(skb, FCOE_HEADER_LEN))) goto err; skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fh = (struct fc_frame_header *) skb_transport_header(skb); oxid = ntohs(fh->fh_ox_id); fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; bg = &bnx2fc_global; spin_lock(&bg->fcoe_rx_list.lock); __skb_queue_tail(&bg->fcoe_rx_list, skb); if (bg->fcoe_rx_list.qlen == 1) wake_up_process(bg->thread); spin_unlock(&bg->fcoe_rx_list.lock); return 0; err: kfree_skb(skb); return -1; } static int bnx2fc_l2_rcv_thread(void *arg) { struct fcoe_percpu_s *bg = arg; struct sk_buff *skb; set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); spin_lock_bh(&bg->fcoe_rx_list.lock); while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { spin_unlock_bh(&bg->fcoe_rx_list.lock); bnx2fc_recv_frame(skb); spin_lock_bh(&bg->fcoe_rx_list.lock); } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&bg->fcoe_rx_list.lock); } __set_current_state(TASK_RUNNING); return 0; } static void bnx2fc_recv_frame(struct sk_buff *skb) { u32 fr_len; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fc_lport *vn_port; struct fcoe_port *port; u8 *mac = NULL; u8 *dest_mac = NULL; struct fcoe_hdr *hp; fr = fcoe_dev_from_skb(skb); lport = fr->fr_dev; if (unlikely(lport == NULL)) { printk(KERN_ERR PFX "Invalid lport struct\n"); kfree_skb(skb); return; } if (skb_is_nonlinear(skb)) skb_linearize(skb); mac = eth_hdr(skb)->h_source; dest_mac = eth_hdr(skb)->h_dest; /* Pull the header */ hp = (struct fcoe_hdr *) skb_network_header(skb); fh = (struct fc_frame_header *) skb_transport_header(skb); skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { kfree_skb(skb); return; } fh = fc_frame_header_get(fp); vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); if (vn_port) { port = lport_priv(vn_port); if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); kfree_skb(skb); return; } } if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ kfree_skb(skb); return; } if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { switch (fc_frame_payload_op(fp)) { case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ kfree_skb(skb); return; } break; } } if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ kfree_skb(skb); return; } stats = per_cpu_ptr(lport->stats, smp_processor_id()); stats->RxFrames++; stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); stats->InvalidCRCCount++; kfree_skb(skb); return; } fc_exch_recv(lport, fp); } /** * bnx2fc_percpu_io_thread - thread per cpu for ios * * @arg: ptr to bnx2fc_percpu_info structure */ int bnx2fc_percpu_io_thread(void *arg) { struct bnx2fc_percpu_s *p = arg; struct bnx2fc_work *work, *tmp; LIST_HEAD(work_list); set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); spin_lock_bh(&p->fp_work_lock); while (!list_empty(&p->work_list)) { list_splice_init(&p->work_list, &work_list); spin_unlock_bh(&p->fp_work_lock); list_for_each_entry_safe(work, tmp, &work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe); kfree(work); } spin_lock_bh(&p->fp_work_lock); } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock_bh(&p->fp_work_lock); } __set_current_state(TASK_RUNNING); return 0; } static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) { struct fc_host_statistics *bnx2fc_stats; struct fc_lport *lport = shost_priv(shost); struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct fcoe_statistics_params *fw_stats; int rc = 0; fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; if (!fw_stats) return NULL; bnx2fc_stats = fc_get_host_stats(shost); init_completion(&hba->stat_req_done); if (bnx2fc_send_stat_req(hba)) return bnx2fc_stats; rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); if (!rc) { BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); return bnx2fc_stats; } BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt); bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt; BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt); bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4); BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt); bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt; BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt); bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4); bnx2fc_stats->dumped_frames = 0; bnx2fc_stats->lip_count = 0; bnx2fc_stats->nos_count = 0; bnx2fc_stats->loss_of_sync_count = 0; bnx2fc_stats->loss_of_signal_count = 0; bnx2fc_stats->prim_seq_protocol_err_count = 0; memcpy(&hba->prev_stats, hba->stats_buffer, sizeof(struct fcoe_statistics_params)); return bnx2fc_stats; } static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; shost->max_lun = BNX2FC_MAX_LUN; shost->max_id = BNX2FC_MAX_FCP_TGT; shost->max_channel = 0; if (lport->vport) shost->transportt = bnx2fc_vport_xport_template; else shost->transportt = bnx2fc_transport_template; /* Add the new host to SCSI-ml */ rc = scsi_add_host(lport->host, dev); if (rc) { printk(KERN_ERR PFX "Error on scsi_add_host\n"); return rc; } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; snprintf(fc_host_symbolic_name(lport->host), 256, "%s (QLogic %s) v%s over %s", BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; } static int bnx2fc_link_ok(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct net_device *dev = hba->phys_dev; int rc = 0; if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else { set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); rc = -1; } return rc; } /** * bnx2fc_get_link_state - get network link state * * @hba: adapter instance pointer * * updates adapter structure flag based on netdev state */ void bnx2fc_get_link_state(struct bnx2fc_hba *hba) { if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); else clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); } static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fcoe_port *port; u64 wwnn, wwpn; port = lport_priv(lport); interface = port->priv; ctlr = bnx2fc_to_ctlr(interface); hba = interface->hba; /* require support for get_pauseparam ethtool op. */ if (!hba->phys_dev->ethtool_ops || !hba->phys_dev->ethtool_ops->get_pauseparam) return -EOPNOTSUPP; if (fc_set_mfs(lport, BNX2FC_MFS)) return -EINVAL; skb_queue_head_init(&port->fcoe_pending_queue); port->fcoe_pending_queue_active = 0; setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); fcoe_link_speed_update(lport); if (!lport->vport) { if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn); if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 2, 0); BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); fc_set_wwpn(lport, wwpn); } return 0; } static void bnx2fc_destroy_timer(unsigned long data) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " "Destroy compl not received!!\n"); set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); } /** * bnx2fc_indicate_netevent - Generic netdev event handler * * @context: adapter structure pointer * @event: event type * @vlan_id: vlan id - associated vlan id with this event * * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. */ static void bnx2fc_indicate_netevent(void *context, unsigned long event, u16 vlan_id) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; struct fcoe_ctlr_device *cdev; struct fc_lport *lport; struct fc_lport *vport; struct bnx2fc_interface *interface, *tmp; struct fcoe_ctlr *ctlr; int wait_for_upload = 0; u32 link_possible = 1; if (vlan_id != 0 && event != NETDEV_UNREGISTER) return; switch (event) { case NETDEV_UP: if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) printk(KERN_ERR "indicate_netevent: "\ "hba is not UP!!\n"); break; case NETDEV_DOWN: clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); link_possible = 0; break; case NETDEV_GOING_DOWN: set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); link_possible = 0; break; case NETDEV_CHANGE: break; case NETDEV_UNREGISTER: if (!vlan_id) return; mutex_lock(&bnx2fc_dev_lock); list_for_each_entry_safe(interface, tmp, &if_list, list) { if (interface->hba == hba && interface->vlan_id == (vlan_id & VLAN_VID_MASK)) __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); /* Ensure ALL destroy work has been completed before return */ flush_workqueue(bnx2fc_wq); return; default: printk(KERN_ERR PFX "Unknown netevent %ld", event); return; } mutex_lock(&bnx2fc_dev_lock); list_for_each_entry(interface, &if_list, list) { if (interface->hba != hba) continue; ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", interface->netdev->name, event); fcoe_link_speed_update(lport); cdev = fcoe_ctlr_to_ctlr_dev(ctlr); if (link_possible && !bnx2fc_link_ok(lport)) { switch (cdev->enabled) { case FCOE_CTLR_DISABLED: pr_info("Link up while interface is disabled.\n"); break; case FCOE_CTLR_ENABLED: case FCOE_CTLR_UNUSED: /* Reset max recv frame size to default */ fc_set_mfs(lport, BNX2FC_MFS); /* * ctlr link up will only be handled during * enable to avoid sending discovery * solicitation on a stale vlan */ if (interface->enabled) fcoe_ctlr_link_up(ctlr); }; } else if (fcoe_ctlr_link_down(ctlr)) { switch (cdev->enabled) { case FCOE_CTLR_DISABLED: pr_info("Link down while interface is disabled.\n"); break; case FCOE_CTLR_ENABLED: case FCOE_CTLR_UNUSED: mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; per_cpu_ptr(lport->stats, get_cpu())->LinkFailureCount++; put_cpu(); fcoe_clean_pending_queue(lport); wait_for_upload = 1; }; } } mutex_unlock(&bnx2fc_dev_lock); if (wait_for_upload) { clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); init_waitqueue_head(&hba->shutdown_wait); BNX2FC_MISC_DBG("indicate_netevent " "num_ofld_sess = %d\n", hba->num_ofld_sess); hba->wait_for_link_down = 1; wait_event_interruptible(hba->shutdown_wait, (hba->num_ofld_sess == 0)); BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n", hba->num_ofld_sess); hba->wait_for_link_down = 0; if (signal_pending(current)) flush_signals(current); } } static int bnx2fc_libfc_config(struct fc_lport *lport) { /* Set the function pointers set by bnx2fc driver */ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, sizeof(struct libfc_function_template)); fc_elsct_init(lport); fc_exch_init(lport); fc_rport_init(lport); fc_disc_init(lport); fc_disc_config(lport, lport); return 0; } static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) { int fcoe_min_xid, fcoe_max_xid; fcoe_min_xid = hba->max_xid + 1; if (nr_cpu_ids <= 2) fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; else fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid, fcoe_max_xid, NULL)) { printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); return -ENOMEM; } return 0; } static int bnx2fc_lport_config(struct fc_lport *lport) { lport->link_up = 0; lport->qfull = 0; lport->max_retry_count = BNX2FC_MAX_RETRY_CNT; lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT; lport->e_d_tov = 2 * 1000; lport->r_a_tov = 10 * 1000; lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); lport->does_npiv = 1; memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; /* alloc stats structure */ if (fc_lport_init_stats(lport)) return -ENOMEM; /* Finish fc_lport configuration */ fc_lport_config(lport); return 0; } /** * bnx2fc_fip_recv - handle a received FIP frame. * * @skb: the received skb * @dev: associated &net_device * @ptype: the &packet_type structure which was used to register this handler. * @orig_dev: original receive &net_device, in case @ dev is a bond. * * Returns: 0 for success */ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; interface = container_of(ptype, struct bnx2fc_interface, fip_packet_type); ctlr = bnx2fc_to_ctlr(interface); fcoe_ctlr_recv(ctlr, skb); return 0; } /** * bnx2fc_update_src_mac - Update Ethernet MAC filters. * * @fip: FCoE controller. * @old: Unicast MAC address to delete if the MAC is non-zero. * @new: Unicast MAC address to add. * * Remove any previously-set unicast MAC filter. * Add secondary FCoE MAC address filter for our OUI. */ static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) { struct fcoe_port *port = lport_priv(lport); memcpy(port->data_src_addr, addr, ETH_ALEN); } /** * bnx2fc_get_src_mac - return the ethernet source address for an lport * * @lport: libfc port */ static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) { struct fcoe_port *port; port = (struct fcoe_port *)lport_priv(lport); return port->data_src_addr; } /** * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. * * @fip: FCoE controller. * @skb: FIP Packet. */ static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) { skb->dev = bnx2fc_from_ctlr(fip)->netdev; dev_queue_xmit(skb); } static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fcoe_port *port = lport_priv(n_port); struct bnx2fc_interface *interface = port->priv; struct net_device *netdev = interface->netdev; struct fc_lport *vn_port; int rc; char buf[32]; rc = fcoe_validate_vport_create(vport); if (rc) { fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); printk(KERN_ERR PFX "Failed to create vport, " "WWPN (0x%s) already exists\n", buf); return rc; } if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "vn ports cannot be created on" "this interface\n"); return -EIO; } rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); vn_port = bnx2fc_if_create(interface, &vport->dev, 1); mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); if (!vn_port) { printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", netdev->name); return -EIO; } if (disabled) { fc_vport_set_state(vport, FC_VPORT_DISABLED); } else { vn_port->boot_time = jiffies; fc_lport_init(vn_port); fc_fabric_login(vn_port); fc_vport_setlink(vn_port); } return 0; } static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) { struct bnx2fc_lport *blport, *tmp; spin_lock_bh(&hba->hba_lock); list_for_each_entry_safe(blport, tmp, &hba->vports, list) { if (blport->lport == lport) { list_del(&blport->list); kfree(blport); } } spin_unlock_bh(&hba->hba_lock); } static int bnx2fc_vport_destroy(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; struct fcoe_port *port = lport_priv(vn_port); struct bnx2fc_interface *interface = port->priv; struct fc_lport *v_port; bool found = false; mutex_lock(&n_port->lp_mutex); list_for_each_entry(v_port, &n_port->vports, list) if (v_port->vport == vport) { found = true; break; } if (!found) { mutex_unlock(&n_port->lp_mutex); return -ENOENT; } list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); bnx2fc_interface_put(interface); queue_work(bnx2fc_wq, &port->destroy_work); return 0; } static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) { struct fc_lport *lport = vport->dd_data; if (disable) { fc_vport_set_state(vport, FC_VPORT_DISABLED); fc_fabric_logoff(lport); } else { lport->boot_time = jiffies; fc_fabric_login(lport); fc_vport_setlink(lport); } return 0; } static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) { struct net_device *netdev = interface->netdev; struct net_device *physdev = interface->hba->phys_dev; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct netdev_hw_addr *ha; int sel_san_mac = 0; /* setup Source MAC Address */ rcu_read_lock(); for_each_dev_addr(physdev, ha) { BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", ha->type); printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], ha->addr[1], ha->addr[2], ha->addr[3], ha->addr[4], ha->addr[5]); if ((ha->type == NETDEV_HW_ADDR_T_SAN) && (is_valid_ether_addr(ha->addr))) { memcpy(ctlr->ctl_src_addr, ha->addr, ETH_ALEN); sel_san_mac = 1; BNX2FC_MISC_DBG("Found SAN MAC\n"); } } rcu_read_unlock(); if (!sel_san_mac) return -ENODEV; interface->fip_packet_type.func = bnx2fc_fip_recv; interface->fip_packet_type.type = htons(ETH_P_FIP); interface->fip_packet_type.dev = netdev; dev_add_pack(&interface->fip_packet_type); interface->fcoe_packet_type.func = bnx2fc_rcv; interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); interface->fcoe_packet_type.dev = netdev; dev_add_pack(&interface->fcoe_packet_type); return 0; } static int bnx2fc_attach_transport(void) { bnx2fc_transport_template = fc_attach_transport(&bnx2fc_transport_function); if (bnx2fc_transport_template == NULL) { printk(KERN_ERR PFX "Failed to attach FC transport\n"); return -ENODEV; } bnx2fc_vport_xport_template = fc_attach_transport(&bnx2fc_vport_xport_function); if (bnx2fc_vport_xport_template == NULL) { printk(KERN_ERR PFX "Failed to attach FC transport for vport\n"); fc_release_transport(bnx2fc_transport_template); bnx2fc_transport_template = NULL; return -ENODEV; } return 0; } static void bnx2fc_release_transport(void) { fc_release_transport(bnx2fc_transport_template); fc_release_transport(bnx2fc_vport_xport_template); bnx2fc_transport_template = NULL; bnx2fc_vport_xport_template = NULL; } static void bnx2fc_interface_release(struct kref *kref) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct net_device *netdev; interface = container_of(kref, struct bnx2fc_interface, kref); BNX2FC_MISC_DBG("Interface is being released\n"); ctlr = bnx2fc_to_ctlr(interface); ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); netdev = interface->netdev; /* tear-down FIP controller */ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) fcoe_ctlr_destroy(ctlr); fcoe_ctlr_device_delete(ctlr_dev); dev_put(netdev); module_put(THIS_MODULE); } static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface) { kref_get(&interface->kref); } static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface) { kref_put(&interface->kref, bnx2fc_interface_release); } static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) { /* Free the command manager */ if (hba->cmd_mgr) { bnx2fc_cmd_mgr_free(hba->cmd_mgr); hba->cmd_mgr = NULL; } kfree(hba->tgt_ofld_list); bnx2fc_unbind_pcidev(hba); kfree(hba); } /** * bnx2fc_hba_create - create a new bnx2fc hba * * @cnic: pointer to cnic device * * Creates a new FCoE hba on the given device. * */ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; struct fcoe_capabilities *fcoe_cap; int rc; hba = kzalloc(sizeof(*hba), GFP_KERNEL); if (!hba) { printk(KERN_ERR PFX "Unable to allocate hba structure\n"); return NULL; } spin_lock_init(&hba->hba_lock); mutex_init(&hba->hba_mutex); hba->cnic = cnic; hba->max_tasks = cnic->max_fcoe_exchanges; hba->elstm_xids = (hba->max_tasks / 2); hba->max_outstanding_cmds = hba->elstm_xids; hba->max_xid = (hba->max_tasks - 1); rc = bnx2fc_bind_pcidev(hba); if (rc) { printk(KERN_ERR PFX "create_adapter: bind error\n"); goto bind_err; } hba->phys_dev = cnic->netdev; hba->next_conn_id = 0; hba->tgt_ofld_list = kzalloc(sizeof(struct bnx2fc_rport *) * BNX2FC_NUM_MAX_SESS, GFP_KERNEL); if (!hba->tgt_ofld_list) { printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); goto tgtofld_err; } hba->num_ofld_sess = 0; hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); if (!hba->cmd_mgr) { printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); goto cmgr_err; } fcoe_cap = &hba->fcoe_cap; fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES << FCOE_IOS_PER_CONNECTION_SHIFT; fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << FCOE_LOGINS_PER_PORT_SHIFT; fcoe_cap->capability2 = hba->max_outstanding_cmds << FCOE_NUMBER_OF_EXCHANGES_SHIFT; fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << FCOE_NPIV_WWN_PER_PORT_SHIFT; fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << FCOE_TARGETS_SUPPORTED_SHIFT; fcoe_cap->capability3 |= hba->max_outstanding_cmds << FCOE_OUTSTANDING_COMMANDS_SHIFT; fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; init_waitqueue_head(&hba->shutdown_wait); init_waitqueue_head(&hba->destroy_wait); INIT_LIST_HEAD(&hba->vports); return hba; cmgr_err: kfree(hba->tgt_ofld_list); tgtofld_err: bnx2fc_unbind_pcidev(hba); bind_err: kfree(hba); return NULL; } struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, struct net_device *netdev, enum fip_state fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int size; int rc = 0; size = (sizeof(*interface) + sizeof(struct fcoe_ctlr)); ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ, size); if (!ctlr_dev) { printk(KERN_ERR PFX "Unable to allocate interface structure\n"); return NULL; } ctlr = fcoe_ctlr_device_priv(ctlr_dev); ctlr->cdev = ctlr_dev; interface = fcoe_ctlr_priv(ctlr); dev_hold(netdev); kref_init(&interface->kref); interface->hba = hba; interface->netdev = netdev; /* Initialize FIP */ fcoe_ctlr_init(ctlr, fip_mode); ctlr->send = bnx2fc_fip_send; ctlr->update_mac = bnx2fc_update_src_mac; ctlr->get_src_addr = bnx2fc_get_src_mac; set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); rc = bnx2fc_interface_setup(interface); if (!rc) return interface; fcoe_ctlr_destroy(ctlr); dev_put(netdev); fcoe_ctlr_device_delete(ctlr_dev); return NULL; } /** * bnx2fc_if_create - Create FCoE instance on a given interface * * @interface: FCoE interface to create a local port on * @parent: Device pointer to be the parent in sysfs for the SCSI host * @npiv: Indicates if the port is vport or not * * Creates a fc_lport instance and a Scsi_Host instance and configure them. * * Returns: Allocated fc_lport or an error pointer */ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport, *n_port; struct fcoe_port *port; struct Scsi_Host *shost; struct fc_vport *vport = dev_to_vport(parent); struct bnx2fc_lport *blport; struct bnx2fc_hba *hba = interface->hba; int rc = 0; blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); if (!blport) { BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); return NULL; } /* Allocate Scsi_Host structure */ bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; if (!npiv) lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); else lport = libfc_vport_create(vport, sizeof(*port)); if (!lport) { printk(KERN_ERR PFX "could not allocate scsi host structure\n"); goto free_blport; } shost = lport->host; port = lport_priv(lport); port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); if (rc) goto lp_config_err; if (npiv) { printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", vport->node_name, vport->port_name); fc_set_wwnn(lport, vport->node_name); fc_set_wwpn(lport, vport->port_name); } /* Configure netdev and networking properties of the lport */ rc = bnx2fc_net_config(lport, interface->netdev); if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); goto lp_config_err; } rc = bnx2fc_shost_config(lport, parent); if (rc) { printk(KERN_ERR PFX "Couldnt configure shost for %s\n", interface->netdev->name); goto lp_config_err; } /* Initialize the libfc library */ rc = bnx2fc_libfc_config(lport); if (rc) { printk(KERN_ERR PFX "Couldnt configure libfc\n"); goto shost_err; } fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; /* Allocate exchange manager */ if (!npiv) rc = bnx2fc_em_config(lport, hba); else { shost = vport_to_shost(vport); n_port = shost_priv(shost); rc = fc_exch_mgr_list_clone(n_port, lport); } if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); goto shost_err; } bnx2fc_interface_get(interface); spin_lock_bh(&hba->hba_lock); blport->lport = lport; list_add_tail(&blport->list, &hba->vports); spin_unlock_bh(&hba->hba_lock); return lport; shost_err: scsi_remove_host(shost); lp_config_err: scsi_host_put(lport->host); free_blport: kfree(blport); return NULL; } static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) { /* Dont listen for Ethernet packets anymore */ __dev_remove_pack(&interface->fcoe_packet_type); __dev_remove_pack(&interface->fip_packet_type); synchronize_net(); } static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); struct bnx2fc_hba *hba = interface->hba; /* Stop the transmit retry timer */ del_timer_sync(&port->timer); /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); bnx2fc_net_cleanup(interface); bnx2fc_free_vport(hba, lport); } static void bnx2fc_if_destroy(struct fc_lport *lport) { /* Free queued packets for the receive thread */ bnx2fc_clean_rx_queue(lport); /* Detach from scsi-ml */ fc_remove_host(lport->host); scsi_remove_host(lport->host); /* * Note that only the physical lport will have the exchange manager. * for vports, this function is NOP */ fc_exch_mgr_free(lport); /* Free memory used by statistical counters */ fc_lport_free_stats(lport); /* Release Scsi_Host */ scsi_host_put(lport->host); } static void __bnx2fc_destroy(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport = ctlr->lp; struct fcoe_port *port = lport_priv(lport); bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); bnx2fc_interface_put(interface); queue_work(bnx2fc_wq, &port->destroy_work); } /** * bnx2fc_destroy - Destroy a bnx2fc FCoE interface * * @buffer: The name of the Ethernet interface to be destroyed * @kp: The associated kernel parameter * * Called from sysfs. * * Returns: 0 for success */ static int bnx2fc_destroy(struct net_device *netdev) { struct bnx2fc_interface *interface = NULL; struct workqueue_struct *timer_work_queue; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface || !ctlr->lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); goto netdev_err; } timer_work_queue = interface->timer_work_queue; __bnx2fc_destroy(interface); destroy_workqueue(timer_work_queue); netdev_err: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } static void bnx2fc_destroy_work(struct work_struct *work) { struct fcoe_port *port; struct fc_lport *lport; port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); bnx2fc_if_destroy(lport); } static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) { bnx2fc_free_fw_resc(hba); bnx2fc_free_task_ctx(hba); } /** * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated * pci structure * * @hba: Adapter instance */ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) { if (bnx2fc_setup_task_ctx(hba)) goto mem_err; if (bnx2fc_setup_fw_resc(hba)) goto mem_err; return 0; mem_err: bnx2fc_unbind_adapter_devices(hba); return -ENOMEM; } static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; pdev = hba->pcidev = cnic->pcidev; if (!hba->pcidev) return -ENODEV; switch (pdev->device) { case PCI_DEVICE_ID_NX2_57710: strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57711: strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57712: case PCI_DEVICE_ID_NX2_57712_MF: case PCI_DEVICE_ID_NX2_57712_VF: strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57800: case PCI_DEVICE_ID_NX2_57800_MF: case PCI_DEVICE_ID_NX2_57800_VF: strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57810: case PCI_DEVICE_ID_NX2_57810_MF: case PCI_DEVICE_ID_NX2_57810_VF: strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); break; case PCI_DEVICE_ID_NX2_57840: case PCI_DEVICE_ID_NX2_57840_MF: case PCI_DEVICE_ID_NX2_57840_VF: case PCI_DEVICE_ID_NX2_57840_2_20: case PCI_DEVICE_ID_NX2_57840_4_10: strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); break; default: pr_err(PFX "Unknown device id 0x%x\n", pdev->device); break; } pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { if (hba->pcidev) { hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); } hba->pcidev = NULL; } /** * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats * * @handle: transport handle pointing to adapter struture */ static int bnx2fc_ulp_get_stats(void *handle) { struct bnx2fc_hba *hba = handle; struct cnic_dev *cnic; struct fcoe_stats_info *stats_addr; if (!hba) return -EINVAL; cnic = hba->cnic; stats_addr = &cnic->stats_addr->fcoe_stat; if (!stats_addr) return -EINVAL; strncpy(stats_addr->version, BNX2FC_VERSION, sizeof(stats_addr->version)); stats_addr->txq_size = BNX2FC_SQ_WQES_MAX; stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX; return 0; } /** * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance * * @handle: transport handle pointing to adapter structure * * This function maps adapter structure to pcidev structure and initiates * firmware handshake to enable/initialize on-chip FCoE components. * This bnx2fc - cnic interface api callback is used after following * conditions are met - * a) underlying network interface is up (marked by event NETDEV_UP * from netdev * b) bnx2fc adatper structure is registered. */ static void bnx2fc_ulp_start(void *handle) { struct bnx2fc_hba *hba = handle; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; struct fc_lport *lport; mutex_lock(&bnx2fc_dev_lock); if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) bnx2fc_fw_init(hba); BNX2FC_MISC_DBG("bnx2fc started.\n"); list_for_each_entry(interface, &if_list, list) { if (interface->hba == hba) { ctlr = bnx2fc_to_ctlr(interface); lport = ctlr->lp; /* Kick off Fabric discovery*/ printk(KERN_ERR PFX "ulp_init: start discovery\n"); lport->tt.frame_send = bnx2fc_xmit; bnx2fc_start_disc(interface); } } mutex_unlock(&bnx2fc_dev_lock); } static void bnx2fc_port_shutdown(struct fc_lport *lport) { BNX2FC_MISC_DBG("Entered %s\n", __func__); fc_fabric_logoff(lport); fc_lport_destroy(lport); } static void bnx2fc_stop(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; struct fc_lport *vport; if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) return; lport = ctlr->lp; bnx2fc_port_shutdown(lport); mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; mutex_unlock(&lport->lp_mutex); fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; fcoe_ctlr_link_down(ctlr); fcoe_clean_pending_queue(lport); } static int bnx2fc_fw_init(struct bnx2fc_hba *hba) { #define BNX2FC_INIT_POLL_TIME (1000 / HZ) int rc = -1; int i = HZ; rc = bnx2fc_bind_adapter_devices(hba); if (rc) { printk(KERN_ALERT PFX "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); goto err_out; } rc = bnx2fc_send_fw_fcoe_init_msg(hba); if (rc) { printk(KERN_ALERT PFX "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); goto err_unbind; } /* * Wait until the adapter init message is complete, and adapter * state is UP. */ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2FC_INIT_POLL_TIME); if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " "Ignoring...\n", hba->cnic->netdev->name); rc = -1; goto err_unbind; } set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); return 0; err_unbind: bnx2fc_unbind_adapter_devices(hba); err_out: return rc; } static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) { if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { init_timer(&hba->destroy_timer); hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + jiffies; hba->destroy_timer.function = bnx2fc_destroy_timer; hba->destroy_timer.data = (unsigned long)hba; add_timer(&hba->destroy_timer); wait_event_interruptible(hba->destroy_wait, test_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags)); clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); /* This should never happen */ if (signal_pending(current)) flush_signals(current); del_timer_sync(&hba->destroy_timer); } bnx2fc_unbind_adapter_devices(hba); } } /** * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance * * @handle: transport handle pointing to adapter structure * * Driver checks if adapter is already in shutdown mode, if not start * the shutdown process. */ static void bnx2fc_ulp_stop(void *handle) { struct bnx2fc_hba *hba = handle; struct bnx2fc_interface *interface; printk(KERN_ERR "ULP_STOP\n"); mutex_lock(&bnx2fc_dev_lock); if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) goto exit; list_for_each_entry(interface, &if_list, list) { if (interface->hba == hba) bnx2fc_stop(interface); } BUG_ON(hba->num_ofld_sess != 0); mutex_lock(&hba->hba_mutex); clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); mutex_unlock(&hba->hba_mutex); bnx2fc_fw_destroy(hba); exit: mutex_unlock(&bnx2fc_dev_lock); } static void bnx2fc_start_disc(struct bnx2fc_interface *interface) { struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct fc_lport *lport; int wait_cnt = 0; BNX2FC_MISC_DBG("Entered %s\n", __func__); /* Kick off FIP/FLOGI */ if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "Init not done yet\n"); return; } lport = ctlr->lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); if (!bnx2fc_link_ok(lport) && interface->enabled) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } /* wait for the FCF to be selected before issuing FLOGI */ while (!ctlr->sel_fcf) { msleep(250); /* give up after 3 secs */ if (++wait_cnt > 12) break; } /* Reset max receive frame size to default */ if (fc_set_mfs(lport, BNX2FC_MFS)) return; fc_lport_init(lport); fc_fabric_login(lport); } /** * bnx2fc_ulp_init - Initialize an adapter instance * * @dev : cnic device handle * Called from cnic_register_driver() context to initialize all * enumerated cnic devices. This routine allocates adapter structure * and other device specific resources. */ static void bnx2fc_ulp_init(struct cnic_dev *dev) { struct bnx2fc_hba *hba; int rc = 0; BNX2FC_MISC_DBG("Entered %s\n", __func__); /* bnx2fc works only when bnx2x is loaded */ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) || (dev->max_fcoe_conn == 0)) { printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," " flags: %lx fcoe_conn: %d\n", dev->netdev->name, dev->flags, dev->max_fcoe_conn); return; } hba = bnx2fc_hba_create(dev); if (!hba) { printk(KERN_ERR PFX "hba initialization failed\n"); return; } /* Add HBA to the adapter list */ mutex_lock(&bnx2fc_dev_lock); list_add_tail(&hba->list, &adapter_list); adapter_count++; mutex_unlock(&bnx2fc_dev_lock); dev->fcoe_cap = &hba->fcoe_cap; clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); rc = dev->register_device(dev, CNIC_ULP_FCOE, (void *) hba); if (rc) printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc); else set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); } /* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) { struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); if (interface->enabled == true) { if (!ctlr->lp) { pr_err(PFX "__bnx2fc_disable: lport not found\n"); return -ENODEV; } else { interface->enabled = false; fcoe_ctlr_link_down(ctlr); fcoe_clean_pending_queue(ctlr->lp); } } return 0; } /** * Deperecated: Use bnx2fc_enabled() */ static int bnx2fc_disable(struct net_device *netdev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface) { rc = -ENODEV; pr_err(PFX "bnx2fc_disable: interface not found\n"); } else { rc = __bnx2fc_disable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) { struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); if (interface->enabled == false) { if (!ctlr->lp) { pr_err(PFX "__bnx2fc_enable: lport not found\n"); return -ENODEV; } else if (!bnx2fc_link_ok(ctlr->lp)) { fcoe_ctlr_link_up(ctlr); interface->enabled = true; } } return 0; } /** * Deprecated: Use bnx2fc_enabled() */ static int bnx2fc_enable(struct net_device *netdev) { struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; int rc = 0; rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); interface = bnx2fc_interface_lookup(netdev); ctlr = bnx2fc_to_ctlr(interface); if (!interface) { rc = -ENODEV; pr_err(PFX "bnx2fc_enable: interface not found\n"); } else { rc = __bnx2fc_enable(ctlr); } mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } /** * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller * @cdev: The FCoE Controller that is being enabled or disabled * * fcoe_sysfs will ensure that the state of 'enabled' has * changed, so no checking is necessary here. This routine simply * calls fcoe_enable or fcoe_disable, both of which are deprecated. * When those routines are removed the functionality can be merged * here. */ static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) { struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); switch (cdev->enabled) { case FCOE_CTLR_ENABLED: return __bnx2fc_enable(ctlr); case FCOE_CTLR_DISABLED: return __bnx2fc_disable(ctlr); case FCOE_CTLR_UNUSED: default: return -ENOTSUPP; }; } enum bnx2fc_create_link_state { BNX2FC_CREATE_LINK_DOWN, BNX2FC_CREATE_LINK_UP, }; /** * _bnx2fc_create() - Create bnx2fc FCoE interface * @netdev : The net_device object the Ethernet interface to create on * @fip_mode: The FIP mode for this creation * @link_state: The ctlr link state on creation * * Called from either the libfcoe 'create' module parameter * via fcoe_create or from fcoe_syfs's ctlr_create file. * * libfcoe's 'create' module parameter is deprecated so some * consolidation of code can be done when that interface is * removed. * * Returns: 0 for success */ static int _bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode, enum bnx2fc_create_link_state link_state) { struct fcoe_ctlr_device *cdev; struct fcoe_ctlr *ctlr; struct bnx2fc_interface *interface; struct bnx2fc_hba *hba; struct net_device *phys_dev = netdev; struct fc_lport *lport; struct ethtool_drvinfo drvinfo; int rc = 0; int vlan_id = 0; BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); if (fip_mode != FIP_MODE_FABRIC) { printk(KERN_ERR "fip mode not FABRIC\n"); return -EIO; } rtnl_lock(); mutex_lock(&bnx2fc_dev_lock); if (!try_module_get(THIS_MODULE)) { rc = -EINVAL; goto mod_err; } /* obtain physical netdev */ if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); /* verify if the physical device is a netxtreme2 device */ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { memset(&drvinfo, 0, sizeof(drvinfo)); phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) { printk(KERN_ERR PFX "Not a netxtreme2 device\n"); rc = -EINVAL; goto netdev_err; } } else { printk(KERN_ERR PFX "unable to obtain drv_info\n"); rc = -EINVAL; goto netdev_err; } /* obtain interface and initialize rest of the structure */ hba = bnx2fc_hba_lookup(phys_dev); if (!hba) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); goto netdev_err; } if (bnx2fc_interface_lookup(netdev)) { rc = -EEXIST; goto netdev_err; } interface = bnx2fc_interface_create(hba, netdev, fip_mode); if (!interface) { printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); rc = -ENOMEM; goto ifput_err; } if (netdev->priv_flags & IFF_802_1Q_VLAN) { vlan_id = vlan_dev_vlan_id(netdev); interface->vlan_enabled = 1; } ctlr = bnx2fc_to_ctlr(interface); cdev = fcoe_ctlr_to_ctlr_dev(ctlr); interface->vlan_id = vlan_id; interface->timer_work_queue = create_singlethread_workqueue("bnx2fc_timer_wq"); if (!interface->timer_work_queue) { printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); rc = -EINVAL; goto ifput_err; } lport = bnx2fc_if_create(interface, &cdev->dev, 0); if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); rc = -EINVAL; goto if_create_err; } /* Add interface to if_list */ list_add_tail(&interface->list, &if_list); lport->boot_time = jiffies; /* Make this master N_port */ ctlr->lp = lport; if (link_state == BNX2FC_CREATE_LINK_UP) cdev->enabled = FCOE_CTLR_ENABLED; else cdev->enabled = FCOE_CTLR_DISABLED; if (link_state == BNX2FC_CREATE_LINK_UP && !bnx2fc_link_ok(lport)) { fcoe_ctlr_link_up(ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); } BNX2FC_HBA_DBG(lport, "create: START DISC\n"); bnx2fc_start_disc(interface); if (link_state == BNX2FC_CREATE_LINK_UP) interface->enabled = true; /* * Release from kref_init in bnx2fc_interface_setup, on success * lport should be holding a reference taken in bnx2fc_if_create */ bnx2fc_interface_put(interface); /* put netdev that was held while calling dev_get_by_name */ mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return 0; if_create_err: destroy_workqueue(interface->timer_work_queue); ifput_err: bnx2fc_net_cleanup(interface); bnx2fc_interface_put(interface); goto mod_err; netdev_err: module_put(THIS_MODULE); mod_err: mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); return rc; } /** * bnx2fc_create() - Create a bnx2fc interface * @netdev : The net_device object the Ethernet interface to create on * @fip_mode: The FIP mode for this creation * * Called from fcoe transport * * Returns: 0 for success */ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) { return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); } /** * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs * @netdev: The net_device to be used by the allocated FCoE Controller * * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr * in a link_down state. The allows the user an opportunity to configure * the FCoE Controller from sysfs before enabling the FCoE Controller. * * Creating in with this routine starts the FCoE Controller in Fabric * mode. The user can change to VN2VN or another mode before enabling. */ static int bnx2fc_ctlr_alloc(struct net_device *netdev) { return _bnx2fc_create(netdev, FIP_MODE_FABRIC, BNX2FC_CREATE_LINK_DOWN); } /** * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance * * @cnic: Pointer to cnic device instance * **/ static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) { struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(hba, &adapter_list, list) { if (hba->cnic == cnic) return hba; } return NULL; } static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *netdev) { struct bnx2fc_interface *interface; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(interface, &if_list, list) { if (interface->netdev == netdev) return interface; } return NULL; } static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) { struct bnx2fc_hba *hba; /* Called with bnx2fc_dev_lock held */ list_for_each_entry(hba, &adapter_list, list) { if (hba->phys_dev == phys_dev) return hba; } printk(KERN_ERR PFX "adapter_lookup: hba NULL\n"); return NULL; } /** * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources * * @dev cnic device handle */ static void bnx2fc_ulp_exit(struct cnic_dev *dev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface, *tmp; BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", dev->netdev->name, dev->flags); return; } mutex_lock(&bnx2fc_dev_lock); hba = bnx2fc_find_hba_for_cnic(dev); if (!hba) { printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", dev); mutex_unlock(&bnx2fc_dev_lock); return; } list_del_init(&hba->list); adapter_count--; list_for_each_entry_safe(interface, tmp, &if_list, list) /* destroy not called yet, move to quiesced list */ if (interface->hba == hba) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); /* Ensure ALL destroy work has been completed before return */ flush_workqueue(bnx2fc_wq); bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); bnx2fc_hba_destroy(hba); } /** * bnx2fc_fcoe_reset - Resets the fcoe * * @shost: shost the reset is from * * Returns: always 0 */ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); fc_lport_reset(lport); return 0; } static bool bnx2fc_match(struct net_device *netdev) { struct net_device *phys_dev = netdev; mutex_lock(&bnx2fc_dev_lock); if (netdev->priv_flags & IFF_802_1Q_VLAN) phys_dev = vlan_dev_real_dev(netdev); if (bnx2fc_hba_lookup(phys_dev)) { mutex_unlock(&bnx2fc_dev_lock); return true; } mutex_unlock(&bnx2fc_dev_lock); return false; } static struct fcoe_transport bnx2fc_transport = { .name = {"bnx2fc"}, .attached = false, .list = LIST_HEAD_INIT(bnx2fc_transport.list), .alloc = bnx2fc_ctlr_alloc, .match = bnx2fc_match, .create = bnx2fc_create, .destroy = bnx2fc_destroy, .enable = bnx2fc_enable, .disable = bnx2fc_disable, }; /** * bnx2fc_percpu_thread_create - Create a receive thread for an * online CPU * * @cpu: cpu index for the online cpu */ static void bnx2fc_percpu_thread_create(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; p = &per_cpu(bnx2fc_percpu, cpu); thread = kthread_create_on_node(bnx2fc_percpu_io_thread, (void *)p, cpu_to_node(cpu), "bnx2fc_thread/%d", cpu); /* bind thread to the cpu */ if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); } } static void bnx2fc_percpu_thread_destroy(unsigned int cpu) { struct bnx2fc_percpu_s *p; struct task_struct *thread; struct bnx2fc_work *work, *tmp; BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); /* Prevent any new work from being queued for this CPU */ p = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&p->fp_work_lock); thread = p->iothread; p->iothread = NULL; /* Free all work in the list */ list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe); kfree(work); } spin_unlock_bh(&p->fp_work_lock); if (thread) kthread_stop(thread); } /** * bnx2fc_cpu_callback - Handler for CPU hotplug events * * @nfb: The callback data block * @action: The event triggering the callback * @hcpu: The index of the CPU that the event is for * * This creates or destroys per-CPU data for fcoe * * Returns NOTIFY_OK always. */ static int bnx2fc_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: printk(PFX "CPU %x online: Create Rx thread\n", cpu); bnx2fc_percpu_thread_create(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); bnx2fc_percpu_thread_destroy(cpu); break; default: break; } return NOTIFY_OK; } /** * bnx2fc_mod_init - module init entry point * * Initialize driver wide global data structures, and register * with cnic module **/ static int __init bnx2fc_mod_init(void) { struct fcoe_percpu_s *bg; struct task_struct *l2_thread; int rc = 0; unsigned int cpu = 0; struct bnx2fc_percpu_s *p; printk(KERN_INFO PFX "%s", version); /* register as a fcoe transport */ rc = fcoe_transport_attach(&bnx2fc_transport); if (rc) { printk(KERN_ERR "failed to register an fcoe transport, check " "if libfcoe is loaded\n"); goto out; } INIT_LIST_HEAD(&adapter_list); INIT_LIST_HEAD(&if_list); mutex_init(&bnx2fc_dev_lock); adapter_count = 0; /* Attach FC transport template */ rc = bnx2fc_attach_transport(); if (rc) goto detach_ft; bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); if (!bnx2fc_wq) { rc = -ENOMEM; goto release_bt; } bg = &bnx2fc_global; skb_queue_head_init(&bg->fcoe_rx_list); l2_thread = kthread_create(bnx2fc_l2_rcv_thread, (void *)bg, "bnx2fc_l2_thread"); if (IS_ERR(l2_thread)) { rc = PTR_ERR(l2_thread); goto free_wq; } wake_up_process(l2_thread); spin_lock_bh(&bg->fcoe_rx_list.lock); bg->thread = l2_thread; spin_unlock_bh(&bg->fcoe_rx_list.lock); for_each_possible_cpu(cpu) { p = &per_cpu(bnx2fc_percpu, cpu); INIT_LIST_HEAD(&p->work_list); spin_lock_init(&p->fp_work_lock); } cpu_notifier_register_begin(); for_each_online_cpu(cpu) { bnx2fc_percpu_thread_create(cpu); } /* Initialize per CPU interrupt thread */ __register_hotcpu_notifier(&bnx2fc_cpu_notifier); cpu_notifier_register_done(); cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); return 0; free_wq: destroy_workqueue(bnx2fc_wq); release_bt: bnx2fc_release_transport(); detach_ft: fcoe_transport_detach(&bnx2fc_transport); out: return rc; } static void __exit bnx2fc_mod_exit(void) { LIST_HEAD(to_be_deleted); struct bnx2fc_hba *hba, *next; struct fcoe_percpu_s *bg; struct task_struct *l2_thread; struct sk_buff *skb; unsigned int cpu = 0; /* * NOTE: Since cnic calls register_driver routine rtnl_lock, * it will have higher precedence than bnx2fc_dev_lock. * unregister_device() cannot be called with bnx2fc_dev_lock * held. */ mutex_lock(&bnx2fc_dev_lock); list_splice(&adapter_list, &to_be_deleted); INIT_LIST_HEAD(&adapter_list); adapter_count = 0; mutex_unlock(&bnx2fc_dev_lock); /* Unregister with cnic */ list_for_each_entry_safe(hba, next, &to_be_deleted, list) { list_del_init(&hba->list); printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n", hba); bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); bnx2fc_hba_destroy(hba); } cnic_unregister_driver(CNIC_ULP_FCOE); /* Destroy global thread */ bg = &bnx2fc_global; spin_lock_bh(&bg->fcoe_rx_list.lock); l2_thread = bg->thread; bg->thread = NULL; while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) kfree_skb(skb); spin_unlock_bh(&bg->fcoe_rx_list.lock); if (l2_thread) kthread_stop(l2_thread); cpu_notifier_register_begin(); /* Destroy per cpu threads */ for_each_online_cpu(cpu) { bnx2fc_percpu_thread_destroy(cpu); } __unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); cpu_notifier_register_done(); destroy_workqueue(bnx2fc_wq); /* * detach from scsi transport * must happen after all destroys are done */ bnx2fc_release_transport(); /* detach from fcoe transport */ fcoe_transport_detach(&bnx2fc_transport); } module_init(bnx2fc_mod_init); module_exit(bnx2fc_mod_exit); static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled, .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, .get_fcoe_fcf_selected = fcoe_fcf_get_selected, .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, }; static struct fc_function_template bnx2fc_transport_function = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct bnx2fc_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = bnx2fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, .vport_create = bnx2fc_vport_create, .vport_delete = bnx2fc_vport_destroy, .vport_disable = bnx2fc_vport_disable, .bsg_request = fc_lport_bsg_request, }; static struct fc_function_template bnx2fc_vport_xport_function = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, .show_host_supported_fc4s = 1, .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, .get_host_port_state = fc_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + sizeof(struct bnx2fc_rport)), .show_rport_maxframe_size = 1, .show_rport_supported_classes = 1, .show_host_fabric_name = 1, .show_starget_node_name = 1, .show_starget_port_name = 1, .show_starget_port_id = 1, .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, .show_rport_dev_loss_tmo = 1, .get_fc_host_stats = fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, .bsg_request = fc_lport_bsg_request, }; /** * scsi_host_template structure used while registering with SCSI-ml */ static struct scsi_host_template bnx2fc_shost_template = { .module = THIS_MODULE, .name = "QLogic Offload FCoE Initiator", .queuecommand = bnx2fc_queuecommand, .eh_abort_handler = bnx2fc_eh_abort, /* abts */ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ .eh_host_reset_handler = fc_eh_host_reset, .slave_alloc = fc_slave_alloc, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, .max_sectors = 1024, .use_blk_tags = 1, .track_queue_depth = 1, }; static struct libfc_function_template bnx2fc_libfc_fcn_templ = { .frame_send = bnx2fc_xmit, .elsct_send = bnx2fc_elsct_send, .fcp_abort_io = bnx2fc_abort_io, .fcp_cleanup = bnx2fc_cleanup, .get_lesb = fcoe_get_lesb, .rport_event_callback = bnx2fc_rport_event_handler, }; /** * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface * structure carrying callback function pointers */ static struct cnic_ulp_ops bnx2fc_cnic_cb = { .owner = THIS_MODULE, .cnic_init = bnx2fc_ulp_init, .cnic_exit = bnx2fc_ulp_exit, .cnic_start = bnx2fc_ulp_start, .cnic_stop = bnx2fc_ulp_stop, .indicate_kcqes = bnx2fc_indicate_kcqe, .indicate_netevent = bnx2fc_indicate_netevent, .cnic_get_stats = bnx2fc_ulp_get_stats, };
gpl-2.0
xcaliburinhand/I9000-Reoriented-for-I897-Froyo
drivers/usb/storage/karma.c
612
6134
/* Driver for Rio Karma * * (c) 2006 Bob Copeland <me@bobcopeland.com> * (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "debug.h" MODULE_DESCRIPTION("Driver for Rio Karma"); MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>, Keith Bennett <keith@mcs.st-and.ac.uk>"); MODULE_LICENSE("GPL"); #define RIO_PREFIX "RIOP\x00" #define RIO_PREFIX_LEN 5 #define RIO_SEND_LEN 40 #define RIO_RECV_LEN 0x200 #define RIO_ENTER_STORAGE 0x1 #define RIO_LEAVE_STORAGE 0x2 #define RIO_RESET 0xC struct karma_data { int in_storage; char *recv; }; static int rio_karma_init(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags)|(USB_US_TYPE_STOR<<24) } struct usb_device_id karma_usb_ids[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, karma_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev karma_unusual_dev_list[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Send commands to Rio Karma. * * For each command we send 40 bytes starting 'RIOP\0' followed by * the command number and a sequence number, which the device will ack * with a 512-byte packet with the high four bits set and everything * else null. Then we send 'RIOP\x80' followed by a zero and the * sequence number, until byte 5 in the response repeats the sequence * number. */ static int rio_karma_send_command(char cmd, struct us_data *us) { int result, partial; unsigned long timeout; static unsigned char seq = 1; struct karma_data *data = (struct karma_data *) us->extra; US_DEBUGP("karma: sending command %04x\n", cmd); memset(us->iobuf, 0, RIO_SEND_LEN); memcpy(us->iobuf, RIO_PREFIX, RIO_PREFIX_LEN); us->iobuf[5] = cmd; us->iobuf[6] = seq; timeout = jiffies + msecs_to_jiffies(6000); for (;;) { result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, us->iobuf, RIO_SEND_LEN, &partial); if (result != USB_STOR_XFER_GOOD) goto err; result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data->recv, RIO_RECV_LEN, &partial); if (result != USB_STOR_XFER_GOOD) goto err; if (data->recv[5] == seq) break; if (time_after(jiffies, timeout)) goto err; us->iobuf[4] = 0x80; us->iobuf[5] = 0; msleep(50); } seq++; if (seq == 0) seq = 1; US_DEBUGP("karma: sent command %04x\n", cmd); return 0; err: US_DEBUGP("karma: command %04x failed\n", cmd); return USB_STOR_TRANSPORT_FAILED; } /* * Trap START_STOP and READ_10 to leave/re-enter storage mode. * Everything else is propagated to the normal bulk layer. */ static int rio_karma_transport(struct scsi_cmnd *srb, struct us_data *us) { int ret; struct karma_data *data = (struct karma_data *) us->extra; if (srb->cmnd[0] == READ_10 && !data->in_storage) { ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); if (ret) return ret; data->in_storage = 1; return usb_stor_Bulk_transport(srb, us); } else if (srb->cmnd[0] == START_STOP) { ret = rio_karma_send_command(RIO_LEAVE_STORAGE, us); if (ret) return ret; data->in_storage = 0; return rio_karma_send_command(RIO_RESET, us); } return usb_stor_Bulk_transport(srb, us); } static void rio_karma_destructor(void *extra) { struct karma_data *data = (struct karma_data *) extra; kfree(data->recv); } static int rio_karma_init(struct us_data *us) { int ret = 0; struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO); if (!data) goto out; data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO); if (!data->recv) { kfree(data); goto out; } us->extra = data; us->extra_destructor = rio_karma_destructor; ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); data->in_storage = (ret == 0); out: return ret; } static int karma_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - karma_usb_ids) + karma_unusual_dev_list); if (result) return result; us->transport_name = "Rio Karma/Bulk"; us->transport = rio_karma_transport; us->transport_reset = usb_stor_Bulk_reset; result = usb_stor_probe2(us); return result; } static struct usb_driver karma_driver = { .name = "ums-karma", .probe = karma_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = karma_usb_ids, .soft_unbind = 1, }; static int __init karma_init(void) { return usb_register(&karma_driver); } static void __exit karma_exit(void) { usb_deregister(&karma_driver); } module_init(karma_init); module_exit(karma_exit);
gpl-2.0
andr7e/rk3188_tablet_jb
kernel/drivers/input/mouse/bcm5974.c
868
27181
/* * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver * * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) * * The USB initialization and package decoding was made by * Scott Shawcroft as part of the touchd user-space driver project: * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com) * * The BCM5974 driver is based on the appletouch driver: * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net) * Copyright (C) 2005 Stelian Pop (stelian@popies.net) * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de) * Copyright (C) 2005 Peter Osterlund (petero2@telia.com) * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch) * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/hid.h> #include <linux/mutex.h> #define USB_VENDOR_ID_APPLE 0x05ac /* MacbookAir, aka wellspring */ #define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223 #define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224 #define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225 /* MacbookProPenryn, aka wellspring2 */ #define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230 #define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231 #define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232 /* Macbook5,1 (unibody), aka wellspring3 */ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 /* MacbookAir3,2 (unibody), aka wellspring5 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f #define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 #define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 /* MacbookAir3,1 (unibody), aka wellspring4 */ #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 #define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 /* Macbook8 (unibody, March 2011) */ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_PROTOCOL), \ .idVendor = USB_VENDOR_ID_APPLE, \ .idProduct = (prod), \ .bInterfaceClass = USB_INTERFACE_CLASS_HID, \ .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \ } /* table of devices that work with this driver */ static const struct usb_device_id bcm5974_table[] = { /* MacbookAir1.1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS), /* MacbookProPenryn */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS), /* Macbook5,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), /* MacbookAir3,2 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS), /* MacbookAir3,1 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), /* MacbookPro8 */ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), /* Terminating entry */ {} }; MODULE_DEVICE_TABLE(usb, bcm5974_table); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver"); MODULE_LICENSE("GPL"); #define dprintk(level, format, a...)\ { if (debug >= level) printk(KERN_DEBUG format, ##a); } static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Activate debugging output"); /* button data structure */ struct bt_data { u8 unknown1; /* constant */ u8 button; /* left button */ u8 rel_x; /* relative x coordinate */ u8 rel_y; /* relative y coordinate */ }; /* trackpad header types */ enum tp_type { TYPE1, /* plain trackpad */ TYPE2 /* button integrated in trackpad */ }; /* trackpad finger data offsets, le16-aligned */ #define FINGER_TYPE1 (13 * sizeof(__le16)) #define FINGER_TYPE2 (15 * sizeof(__le16)) /* trackpad button data offsets */ #define BUTTON_TYPE2 15 /* list of device capability bits */ #define HAS_INTEGRATED_BUTTON 1 /* trackpad finger structure, le16-aligned */ struct tp_finger { __le16 origin; /* zero when switching track finger */ __le16 abs_x; /* absolute x coodinate */ __le16 abs_y; /* absolute y coodinate */ __le16 rel_x; /* relative x coodinate */ __le16 rel_y; /* relative y coodinate */ __le16 size_major; /* finger size, major axis? */ __le16 size_minor; /* finger size, minor axis? */ __le16 orientation; /* 16384 when point, else 15 bit angle */ __le16 force_major; /* trackpad force, major axis? */ __le16 force_minor; /* trackpad force, minor axis? */ __le16 unused[3]; /* zeros */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ #define SIZEOF_FINGER sizeof(struct tp_finger) #define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER) #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ struct bcm5974_param { int dim; /* logical dimension */ int fuzz; /* logical noise value */ int devmin; /* device minimum reading */ int devmax; /* device maximum reading */ }; /* device-specific configuration */ struct bcm5974_config { int ansi, iso, jis; /* the product id of this device */ int caps; /* device capability bitmask */ int bt_ep; /* the endpoint of the button interface */ int bt_datalen; /* data length of the button interface */ int tp_ep; /* the endpoint of the trackpad interface */ enum tp_type tp_type; /* type of trackpad interface */ int tp_offset; /* offset to trackpad finger data */ int tp_datalen; /* data length of the trackpad interface */ struct bcm5974_param p; /* finger pressure limits */ struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ struct bcm5974_param y; /* vertical limits */ }; /* logical device structure */ struct bcm5974 { char phys[64]; struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* our interface */ struct input_dev *input; /* input dev */ struct bcm5974_config cfg; /* device configuration */ struct mutex pm_mutex; /* serialize access to open/suspend */ int opened; /* 1: opened, 0: closed */ struct urb *bt_urb; /* button usb request block */ struct bt_data *bt_data; /* button transferred data */ struct urb *tp_urb; /* trackpad usb request block */ u8 *tp_data; /* trackpad transferred data */ int fingers; /* number of fingers on trackpad */ }; /* logical dimensions */ #define DIM_PRESSURE 256 /* maximum finger pressure */ #define DIM_WIDTH 16 /* maximum finger width */ #define DIM_X 1280 /* maximum trackpad x value */ #define DIM_Y 800 /* maximum trackpad y value */ /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ #define SN_WIDTH 100 /* width signal-to-noise ratio */ #define SN_COORD 250 /* coordinate signal-to-noise ratio */ /* pressure thresholds */ #define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE) #define PRESSURE_HIGH (3 * PRESSURE_LOW) /* device constants */ static const struct bcm5974_config bcm5974_config_table[] = { { USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4824, 5342 }, { DIM_Y, DIM_Y / SN_COORD, -172, 5820 } }, { USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, { DIM_Y, DIM_Y / SN_COORD, -172, 4290 } }, { USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } }, { USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } }, { USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } }, { USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } }, {} }; /* return the device-specific configuration by device */ static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev) { u16 id = le16_to_cpu(udev->descriptor.idProduct); const struct bcm5974_config *cfg; for (cfg = bcm5974_config_table; cfg->ansi; ++cfg) if (cfg->ansi == id || cfg->iso == id || cfg->jis == id) return cfg; return bcm5974_config_table; } /* convert 16-bit little endian to signed integer */ static inline int raw2int(__le16 x) { return (signed short)le16_to_cpu(x); } /* scale device data to logical dimensions (asserts devmin < devmax) */ static inline int int2scale(const struct bcm5974_param *p, int x) { return x * p->dim / (p->devmax - p->devmin); } /* all logical value ranges are [0,dim). */ static inline int int2bound(const struct bcm5974_param *p, int x) { int s = int2scale(p, x); return clamp_val(s, 0, p->dim - 1); } /* setup which logical events to report */ static void setup_events_to_report(struct input_dev *input_dev, const struct bcm5974_config *cfg) { __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, cfg->p.dim, cfg->p.fuzz, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, cfg->w.dim, cfg->w.fuzz, 0); input_set_abs_params(input_dev, ABS_X, 0, cfg->x.dim, cfg->x.fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, cfg->y.dim, cfg->y.fuzz, 0); /* finger touch area */ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, cfg->w.devmin, cfg->w.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, cfg->w.devmin, cfg->w.devmax, 0, 0); /* finger approach area */ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, cfg->w.devmin, cfg->w.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, cfg->w.devmin, cfg->w.devmax, 0, 0); /* finger orientation */ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION, 0, 0); /* finger position */ input_set_abs_params(input_dev, ABS_MT_POSITION_X, cfg->x.devmin, cfg->x.devmax, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, cfg->y.devmin, cfg->y.devmax, 0, 0); __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); if (cfg->caps & HAS_INTEGRATED_BUTTON) __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); input_set_events_per_packet(input_dev, 60); } /* report button data as logical button state */ static int report_bt_state(struct bcm5974 *dev, int size) { if (size != sizeof(struct bt_data)) return -EIO; dprintk(7, "bcm5974: button data: %x %x %x %x\n", dev->bt_data->unknown1, dev->bt_data->button, dev->bt_data->rel_x, dev->bt_data->rel_y); input_report_key(dev->input, BTN_LEFT, dev->bt_data->button); input_sync(dev->input); return 0; } static void report_finger_data(struct input_dev *input, const struct bcm5974_config *cfg, const struct tp_finger *f) { input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major) << 1); input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor) << 1); input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major) << 1); input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor) << 1); input_report_abs(input, ABS_MT_ORIENTATION, MAX_FINGER_ORIENTATION - raw2int(f->orientation)); input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); input_report_abs(input, ABS_MT_POSITION_Y, cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y)); input_mt_sync(input); } /* report trackpad data as logical trackpad state */ static int report_tp_state(struct bcm5974 *dev, int size) { const struct bcm5974_config *c = &dev->cfg; const struct tp_finger *f; struct input_dev *input = dev->input; int raw_p, raw_w, raw_x, raw_y, raw_n, i; int ptest, origin, ibt = 0, nmin = 0, nmax = 0; int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) return -EIO; /* finger data, le16-aligned */ f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); raw_n = (size - c->tp_offset) / SIZEOF_FINGER; /* always track the first finger; when detached, start over */ if (raw_n) { /* report raw trackpad data */ for (i = 0; i < raw_n; i++) report_finger_data(input, c, &f[i]); raw_p = raw2int(f->force_major); raw_w = raw2int(f->size_major); raw_x = raw2int(f->abs_x); raw_y = raw2int(f->abs_y); dprintk(9, "bcm5974: " "raw: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", raw_p, raw_w, raw_x, raw_y, raw_n); ptest = int2bound(&c->p, raw_p); origin = raw2int(f->origin); /* while tracking finger still valid, count all fingers */ if (ptest > PRESSURE_LOW && origin) { abs_p = ptest; abs_w = int2bound(&c->w, raw_w); abs_x = int2bound(&c->x, raw_x - c->x.devmin); abs_y = int2bound(&c->y, c->y.devmax - raw_y); while (raw_n--) { ptest = int2bound(&c->p, raw2int(f->force_major)); if (ptest > PRESSURE_LOW) nmax++; if (ptest > PRESSURE_HIGH) nmin++; f++; } } } /* set the integrated button if applicable */ if (c->tp_type == TYPE2) ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); if (dev->fingers < nmin) dev->fingers = nmin; if (dev->fingers > nmax) dev->fingers = nmax; input_report_key(input, BTN_TOUCH, dev->fingers > 0); input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1); input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2); input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers == 3); input_report_key(input, BTN_TOOL_QUADTAP, dev->fingers > 3); input_report_abs(input, ABS_PRESSURE, abs_p); input_report_abs(input, ABS_TOOL_WIDTH, abs_w); if (abs_p) { input_report_abs(input, ABS_X, abs_x); input_report_abs(input, ABS_Y, abs_y); dprintk(8, "bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d " "nmin: %d nmax: %d n: %d ibt: %d\n", abs_p, abs_w, abs_x, abs_y, nmin, nmax, dev->fingers, ibt); } /* type 2 reports button events via ibt only */ if (c->tp_type == TYPE2) input_report_key(input, BTN_LEFT, ibt); input_sync(input); return 0; } /* Wellspring initialization constants */ #define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 #define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 #define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { char *data = kmalloc(8, GFP_KERNEL); int retval = 0, size; if (!data) { err("bcm5974: out of memory"); retval = -ENOMEM; goto out; } /* read configuration */ size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, BCM5974_WELLSPRING_MODE_REQUEST_VALUE, BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); if (size != 8) { err("bcm5974: could not read from device"); retval = -EIO; goto out; } /* apply the mode switch */ data[0] = on ? BCM5974_WELLSPRING_MODE_VENDOR_VALUE : BCM5974_WELLSPRING_MODE_NORMAL_VALUE; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, BCM5974_WELLSPRING_MODE_REQUEST_VALUE, BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); if (size != 8) { err("bcm5974: could not write to device"); retval = -EIO; goto out; } dprintk(2, "bcm5974: switched to %s mode.\n", on ? "wellspring" : "normal"); out: kfree(data); return retval; } static void bcm5974_irq_button(struct urb *urb) { struct bcm5974 *dev = urb->context; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("bcm5974: button urb shutting down: %d", urb->status); return; default: dbg("bcm5974: button urb status: %d", urb->status); goto exit; } if (report_bt_state(dev, dev->bt_urb->actual_length)) dprintk(1, "bcm5974: bad button package, length: %d\n", dev->bt_urb->actual_length); exit: error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC); if (error) err("bcm5974: button urb failed: %d", error); } static void bcm5974_irq_trackpad(struct urb *urb) { struct bcm5974 *dev = urb->context; int error; switch (urb->status) { case 0: break; case -EOVERFLOW: case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: dbg("bcm5974: trackpad urb shutting down: %d", urb->status); return; default: dbg("bcm5974: trackpad urb status: %d", urb->status); goto exit; } /* control response ignored */ if (dev->tp_urb->actual_length == 2) goto exit; if (report_tp_state(dev, dev->tp_urb->actual_length)) dprintk(1, "bcm5974: bad trackpad package, length: %d\n", dev->tp_urb->actual_length); exit: error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC); if (error) err("bcm5974: trackpad urb failed: %d", error); } /* * The Wellspring trackpad, like many recent Apple trackpads, share * the usb device with the keyboard. Since keyboards are usually * handled by the HID system, the device ends up being handled by two * modules. Setting up the device therefore becomes slightly * complicated. To enable multitouch features, a mode switch is * required, which is usually applied via the control interface of the * device. It can be argued where this switch should take place. In * some drivers, like appletouch, the switch is made during * probe. However, the hid module may also alter the state of the * device, resulting in trackpad malfunction under certain * circumstances. To get around this problem, there is at least one * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to * receive a reset_resume request rather than the normal resume. * Since the implementation of reset_resume is equal to mode switch * plus start_traffic, it seems easier to always do the switch when * starting traffic on the device. */ static int bcm5974_start_traffic(struct bcm5974 *dev) { int error; error = bcm5974_wellspring_mode(dev, true); if (error) { dprintk(1, "bcm5974: mode switch failed\n"); goto err_out; } error = usb_submit_urb(dev->bt_urb, GFP_KERNEL); if (error) goto err_reset_mode; error = usb_submit_urb(dev->tp_urb, GFP_KERNEL); if (error) goto err_kill_bt; return 0; err_kill_bt: usb_kill_urb(dev->bt_urb); err_reset_mode: bcm5974_wellspring_mode(dev, false); err_out: return error; } static void bcm5974_pause_traffic(struct bcm5974 *dev) { usb_kill_urb(dev->tp_urb); usb_kill_urb(dev->bt_urb); bcm5974_wellspring_mode(dev, false); } /* * The code below implements open/close and manual suspend/resume. * All functions may be called in random order. * * Opening a suspended device fails with EACCES - permission denied. * * Failing a resume leaves the device resumed but closed. */ static int bcm5974_open(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); int error; error = usb_autopm_get_interface(dev->intf); if (error) return error; mutex_lock(&dev->pm_mutex); error = bcm5974_start_traffic(dev); if (!error) dev->opened = 1; mutex_unlock(&dev->pm_mutex); if (error) usb_autopm_put_interface(dev->intf); return error; } static void bcm5974_close(struct input_dev *input) { struct bcm5974 *dev = input_get_drvdata(input); mutex_lock(&dev->pm_mutex); bcm5974_pause_traffic(dev); dev->opened = 0; mutex_unlock(&dev->pm_mutex); usb_autopm_put_interface(dev->intf); } static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message) { struct bcm5974 *dev = usb_get_intfdata(iface); mutex_lock(&dev->pm_mutex); if (dev->opened) bcm5974_pause_traffic(dev); mutex_unlock(&dev->pm_mutex); return 0; } static int bcm5974_resume(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); int error = 0; mutex_lock(&dev->pm_mutex); if (dev->opened) error = bcm5974_start_traffic(dev); mutex_unlock(&dev->pm_mutex); return error; } static int bcm5974_probe(struct usb_interface *iface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(iface); const struct bcm5974_config *cfg; struct bcm5974 *dev; struct input_dev *input_dev; int error = -ENOMEM; /* find the product index */ cfg = bcm5974_get_config(udev); /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL); input_dev = input_allocate_device(); if (!dev || !input_dev) { err("bcm5974: out of memory"); goto err_free_devs; } dev->udev = udev; dev->intf = iface; dev->input = input_dev; dev->cfg = *cfg; mutex_init(&dev->pm_mutex); /* setup urbs */ dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->bt_urb) goto err_free_devs; dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tp_urb) goto err_free_bt_urb; dev->bt_data = usb_alloc_coherent(dev->udev, dev->cfg.bt_datalen, GFP_KERNEL, &dev->bt_urb->transfer_dma); if (!dev->bt_data) goto err_free_urb; dev->tp_data = usb_alloc_coherent(dev->udev, dev->cfg.tp_datalen, GFP_KERNEL, &dev->tp_urb->transfer_dma); if (!dev->tp_data) goto err_free_bt_buffer; usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); input_dev->name = "bcm5974"; input_dev->phys = dev->phys; usb_to_input_id(dev->udev, &input_dev->id); /* report driver capabilities via the version field */ input_dev->id.version = cfg->caps; input_dev->dev.parent = &iface->dev; input_set_drvdata(input_dev, dev); input_dev->open = bcm5974_open; input_dev->close = bcm5974_close; setup_events_to_report(input_dev, cfg); error = input_register_device(dev->input); if (error) goto err_free_buffer; /* save our data pointer in this interface device */ usb_set_intfdata(iface, dev); return 0; err_free_buffer: usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); err_free_bt_buffer: usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); err_free_urb: usb_free_urb(dev->tp_urb); err_free_bt_urb: usb_free_urb(dev->bt_urb); err_free_devs: usb_set_intfdata(iface, NULL); input_free_device(input_dev); kfree(dev); return error; } static void bcm5974_disconnect(struct usb_interface *iface) { struct bcm5974 *dev = usb_get_intfdata(iface); usb_set_intfdata(iface, NULL); input_unregister_device(dev->input); usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); usb_free_coherent(dev->udev, dev->cfg.bt_datalen, dev->bt_data, dev->bt_urb->transfer_dma); usb_free_urb(dev->tp_urb); usb_free_urb(dev->bt_urb); kfree(dev); } static struct usb_driver bcm5974_driver = { .name = "bcm5974", .probe = bcm5974_probe, .disconnect = bcm5974_disconnect, .suspend = bcm5974_suspend, .resume = bcm5974_resume, .id_table = bcm5974_table, .supports_autosuspend = 1, }; static int __init bcm5974_init(void) { return usb_register(&bcm5974_driver); } static void __exit bcm5974_exit(void) { usb_deregister(&bcm5974_driver); } module_init(bcm5974_init); module_exit(bcm5974_exit);
gpl-2.0
OPTICM/android_kernel_samsung_msm8660-common
arch/arm/mach-pxa/cm-x255.c
2916
4941
/* * linux/arch/arm/mach-pxa/cm-x255.c * * Copyright (C) 2007, 2008 CompuLab, Ltd. * Mike Rapoport <mike@compulab.co.il> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand-gpio.h> #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <mach/pxa25x.h> #include "generic.h" #define GPIO_NAND_CS (5) #define GPIO_NAND_ALE (4) #define GPIO_NAND_CLE (3) #define GPIO_NAND_RB (10) static unsigned long cmx255_pin_config[] = { /* AC'97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, /* BTUART */ GPIO42_BTUART_RXD, GPIO43_BTUART_TXD, GPIO44_BTUART_CTS, GPIO45_BTUART_RTS, /* STUART */ GPIO46_STUART_RXD, GPIO47_STUART_TXD, /* LCD */ GPIOxx_LCD_TFT_16BPP, /* SSP1 */ GPIO23_SSP1_SCLK, GPIO24_SSP1_SFRM, GPIO25_SSP1_TXD, GPIO26_SSP1_RXD, /* SSP2 */ GPIO81_SSP2_CLK_OUT, GPIO82_SSP2_FRM_OUT, GPIO83_SSP2_TXD, GPIO84_SSP2_RXD, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO54_nPSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, /* SDRAM and local bus */ GPIO15_nCS_1, GPIO78_nCS_2, GPIO79_nCS_3, GPIO80_nCS_4, GPIO33_nCS_5, GPIO18_RDY, /* GPIO */ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, GPIO9_GPIO, /* PC card reset */ /* NAND controls */ GPIO5_GPIO | MFP_LPM_DRIVE_HIGH, /* NAND CE# */ GPIO4_GPIO | MFP_LPM_DRIVE_LOW, /* NAND ALE */ GPIO3_GPIO | MFP_LPM_DRIVE_LOW, /* NAND CLE */ GPIO10_GPIO, /* NAND Ready/Busy */ /* interrupts */ GPIO22_GPIO, /* DM9000 interrupt */ }; #if defined(CONFIG_SPI_PXA2XX) static struct pxa2xx_spi_master pxa_ssp_master_info = { .num_chipselect = 1, }; static struct spi_board_info spi_board_info[] __initdata = { [0] = { .modalias = "rtc-max6902", .max_speed_hz = 1000000, .bus_num = 1, .chip_select = 0, }, }; static void __init cmx255_init_rtc(void) { pxa2xx_set_spi_info(1, &pxa_ssp_master_info); spi_register_board_info(ARRAY_AND_SIZE(spi_board_info)); } #else static inline void cmx255_init_rtc(void) {} #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition cmx255_nor_partitions[] = { { .name = "ARMmon", .size = 0x00030000, .offset = 0, .mask_flags = MTD_WRITEABLE /* force read-only */ } , { .name = "ARMmon setup block", .size = 0x00010000, .offset = MTDPART_OFS_APPEND, .mask_flags = MTD_WRITEABLE /* force read-only */ } , { .name = "kernel", .size = 0x00160000, .offset = MTDPART_OFS_APPEND, } , { .name = "ramdisk", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND } }; static struct physmap_flash_data cmx255_nor_flash_data[] = { { .width = 2, /* bankwidth in bytes */ .parts = cmx255_nor_partitions, .nr_parts = ARRAY_SIZE(cmx255_nor_partitions) } }; static struct resource cmx255_nor_resource = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_8M - 1, .flags = IORESOURCE_MEM, }; static struct platform_device cmx255_nor = { .name = "physmap-flash", .id = -1, .dev = { .platform_data = cmx255_nor_flash_data, }, .resource = &cmx255_nor_resource, .num_resources = 1, }; static void __init cmx255_init_nor(void) { platform_device_register(&cmx255_nor); } #else static inline void cmx255_init_nor(void) {} #endif #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) static struct resource cmx255_nand_resource[] = { [0] = { .start = PXA_CS1_PHYS, .end = PXA_CS1_PHYS + 11, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_CS5_PHYS, .end = PXA_CS5_PHYS + 3, .flags = IORESOURCE_MEM, }, }; static struct mtd_partition cmx255_nand_parts[] = { [0] = { .name = "cmx255-nand", .size = MTDPART_SIZ_FULL, .offset = 0, }, }; static struct gpio_nand_platdata cmx255_nand_platdata = { .gpio_nce = GPIO_NAND_CS, .gpio_cle = GPIO_NAND_CLE, .gpio_ale = GPIO_NAND_ALE, .gpio_rdy = GPIO_NAND_RB, .gpio_nwp = -1, .parts = cmx255_nand_parts, .num_parts = ARRAY_SIZE(cmx255_nand_parts), .chip_delay = 25, }; static struct platform_device cmx255_nand = { .name = "gpio-nand", .num_resources = ARRAY_SIZE(cmx255_nand_resource), .resource = cmx255_nand_resource, .id = -1, .dev = { .platform_data = &cmx255_nand_platdata, } }; static void __init cmx255_init_nand(void) { platform_device_register(&cmx255_nand); } #else static inline void cmx255_init_nand(void) {} #endif void __init cmx255_init(void) { pxa2xx_mfp_config(ARRAY_AND_SIZE(cmx255_pin_config)); cmx255_init_rtc(); cmx255_init_nor(); cmx255_init_nand(); }
gpl-2.0
naota/hfsplus
arch/arm/mach-kirkwood/cpuidle.c
4452
2818
/* * arch/arm/mach-kirkwood/cpuidle.c * * CPU idle Marvell Kirkwood SoCs * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. * * The cpu idle uses wait-for-interrupt and DDR self refresh in order * to implement two idle states - * #1 wait-for-interrupt * #2 wait-for-interrupt and DDR self refresh */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/cpuidle.h> #include <linux/io.h> #include <asm/proc-fns.h> #include <mach/kirkwood.h> #define KIRKWOOD_MAX_STATES 2 static struct cpuidle_driver kirkwood_idle_driver = { .name = "kirkwood_idle", .owner = THIS_MODULE, }; static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); /* Actual code that puts the SoC in different idle states */ static int kirkwood_enter_idle(struct cpuidle_device *dev, struct cpuidle_state *state) { struct timeval before, after; int idle_time; local_irq_disable(); do_gettimeofday(&before); if (state == &dev->states[0]) /* Wait for interrupt state */ cpu_do_idle(); else if (state == &dev->states[1]) { /* * Following write will put DDR in self refresh. * Note that we have 256 cycles before DDR puts it * self in self-refresh, so the wait-for-interrupt * call afterwards won't get the DDR from self refresh * mode. */ writel(0x7, DDR_OPERATION_BASE); cpu_do_idle(); } do_gettimeofday(&after); local_irq_enable(); idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC + (after.tv_usec - before.tv_usec); return idle_time; } /* Initialize CPU idle by registering the idle states */ static int kirkwood_init_cpuidle(void) { struct cpuidle_device *device; cpuidle_register_driver(&kirkwood_idle_driver); device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; /* Wait for interrupt state */ device->states[0].enter = kirkwood_enter_idle; device->states[0].exit_latency = 1; device->states[0].target_residency = 10000; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "WFI"); strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and DDR self refresh state */ device->states[1].enter = kirkwood_enter_idle; device->states[1].exit_latency = 10; device->states[1].target_residency = 10000; device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[1].name, "DDR SR"); strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; } return 0; } device_initcall(kirkwood_init_cpuidle);
gpl-2.0
s0627js/android_kernel_SHV-E300S
arch/arm/mach-omap2/prm_common.c
4708
8820
/* * OMAP2+ common Power & Reset Management (PRM) IP block functions * * Copyright (C) 2011 Texas Instruments, Inc. * Tero Kristo <t-kristo@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * * For historical purposes, the API used to configure the PRM * interrupt handler refers to it as the "PRCM interrupt." The * underlying registers are located in the PRM on OMAP3/4. * * XXX This code should eventually be moved to a PRM driver. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <plat/common.h> #include <plat/prcm.h> #include <plat/irqs.h> #include "prm2xxx_3xxx.h" #include "prm44xx.h" /* * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs * XXX this is technically not needed, since * omap_prcm_register_chain_handler() could allocate this based on the * actual amount of memory needed for the SoC */ #define OMAP_PRCM_MAX_NR_PENDING_REG 2 /* * prcm_irq_chips: an array of all of the "generic IRQ chips" in use * by the PRCM interrupt handler code. There will be one 'chip' per * PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have * one "chip" and OMAP4 will have two.) */ static struct irq_chip_generic **prcm_irq_chips; /* * prcm_irq_setup: the PRCM IRQ parameters for the hardware the code * is currently running on. Defined and passed by initialization code * that calls omap_prcm_register_chain_handler(). */ static struct omap_prcm_irq_setup *prcm_irq_setup; /* Private functions */ /* * Move priority events from events to priority_events array */ static void omap_prcm_events_filter_priority(unsigned long *events, unsigned long *priority_events) { int i; for (i = 0; i < prcm_irq_setup->nr_regs; i++) { priority_events[i] = events[i] & prcm_irq_setup->priority_mask[i]; events[i] ^= priority_events[i]; } } /* * PRCM Interrupt Handler * * This is a common handler for the OMAP PRCM interrupts. Pending * interrupts are detected by a call to prcm_pending_events and * dispatched accordingly. Clearing of the wakeup events should be * done by the SoC specific individual handlers. */ static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG]; unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG]; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int virtirq; int nr_irqs = prcm_irq_setup->nr_regs * 32; /* * If we are suspended, mask all interrupts from PRCM level, * this does not ack them, and they will be pending until we * re-enable the interrupts, at which point the * omap_prcm_irq_handler will be executed again. The * _save_and_clear_irqen() function must ensure that the PRM * write to disable all IRQs has reached the PRM before * returning, or spurious PRCM interrupts may occur during * suspend. */ if (prcm_irq_setup->suspended) { prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask); prcm_irq_setup->suspend_save_flag = true; } /* * Loop until all pending irqs are handled, since * generic_handle_irq() can cause new irqs to come */ while (!prcm_irq_setup->suspended) { prcm_irq_setup->read_pending_irqs(pending); /* No bit set, then all IRQs are handled */ if (find_first_bit(pending, nr_irqs) >= nr_irqs) break; omap_prcm_events_filter_priority(pending, priority_pending); /* * Loop on all currently pending irqs so that new irqs * cannot starve previously pending irqs */ /* Serve priority events first */ for_each_set_bit(virtirq, priority_pending, nr_irqs) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); /* Serve normal events next */ for_each_set_bit(virtirq, pending, nr_irqs) generic_handle_irq(prcm_irq_setup->base_irq + virtirq); } if (chip->irq_ack) chip->irq_ack(&desc->irq_data); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); chip->irq_unmask(&desc->irq_data); prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */ } /* Public functions */ /** * omap_prcm_event_to_irq - given a PRCM event name, returns the * corresponding IRQ on which the handler should be registered * @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq * * Returns the Linux internal IRQ ID corresponding to @name upon success, * or -ENOENT upon failure. */ int omap_prcm_event_to_irq(const char *name) { int i; if (!prcm_irq_setup || !name) return -ENOENT; for (i = 0; i < prcm_irq_setup->nr_irqs; i++) if (!strcmp(prcm_irq_setup->irqs[i].name, name)) return prcm_irq_setup->base_irq + prcm_irq_setup->irqs[i].offset; return -ENOENT; } /** * omap_prcm_irq_cleanup - reverses memory allocated and other steps * done by omap_prcm_register_chain_handler() * * No return value. */ void omap_prcm_irq_cleanup(void) { int i; if (!prcm_irq_setup) { pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n"); return; } if (prcm_irq_chips) { for (i = 0; i < prcm_irq_setup->nr_regs; i++) { if (prcm_irq_chips[i]) irq_remove_generic_chip(prcm_irq_chips[i], 0xffffffff, 0, 0); prcm_irq_chips[i] = NULL; } kfree(prcm_irq_chips); prcm_irq_chips = NULL; } kfree(prcm_irq_setup->saved_mask); prcm_irq_setup->saved_mask = NULL; kfree(prcm_irq_setup->priority_mask); prcm_irq_setup->priority_mask = NULL; irq_set_chained_handler(prcm_irq_setup->irq, NULL); if (prcm_irq_setup->base_irq > 0) irq_free_descs(prcm_irq_setup->base_irq, prcm_irq_setup->nr_regs * 32); prcm_irq_setup->base_irq = 0; } void omap_prcm_irq_prepare(void) { prcm_irq_setup->suspended = true; } void omap_prcm_irq_complete(void) { prcm_irq_setup->suspended = false; /* If we have not saved the masks, do not attempt to restore */ if (!prcm_irq_setup->suspend_save_flag) return; prcm_irq_setup->suspend_save_flag = false; /* * Re-enable all masked PRCM irq sources, this causes the PRCM * interrupt to fire immediately if the events were masked * previously in the chain handler */ prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask); } /** * omap_prcm_register_chain_handler - initializes the prcm chained interrupt * handler based on provided parameters * @irq_setup: hardware data about the underlying PRM/PRCM * * Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up * one generic IRQ chip per PRM interrupt status/enable register pair. * Returns 0 upon success, -EINVAL if called twice or if invalid * arguments are passed, or -ENOMEM on any other error. */ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup) { int nr_regs = irq_setup->nr_regs; u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG]; int offset, i; struct irq_chip_generic *gc; struct irq_chip_type *ct; if (!irq_setup) return -EINVAL; if (prcm_irq_setup) { pr_err("PRCM: already initialized; won't reinitialize\n"); return -EINVAL; } if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) { pr_err("PRCM: nr_regs too large\n"); return -EINVAL; } prcm_irq_setup = irq_setup; prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL); prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL); if (!prcm_irq_chips || !prcm_irq_setup->saved_mask || !prcm_irq_setup->priority_mask) { pr_err("PRCM: kzalloc failed\n"); goto err; } memset(mask, 0, sizeof(mask)); for (i = 0; i < irq_setup->nr_irqs; i++) { offset = irq_setup->irqs[i].offset; mask[offset >> 5] |= 1 << (offset & 0x1f); if (irq_setup->irqs[i].priority) irq_setup->priority_mask[offset >> 5] |= 1 << (offset & 0x1f); } irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler); irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32, 0); if (irq_setup->base_irq < 0) { pr_err("PRCM: failed to allocate irq descs: %d\n", irq_setup->base_irq); goto err; } for (i = 0; i < irq_setup->nr_regs; i++) { gc = irq_alloc_generic_chip("PRCM", 1, irq_setup->base_irq + i * 32, prm_base, handle_level_irq); if (!gc) { pr_err("PRCM: failed to allocate generic chip\n"); goto err; } ct = gc->chip_types; ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->regs.ack = irq_setup->ack + i * 4; ct->regs.mask = irq_setup->mask + i * 4; irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0); prcm_irq_chips[i] = gc; } return 0; err: omap_prcm_irq_cleanup(); return -ENOMEM; }
gpl-2.0
regalstreak/android_kernel_samsung_logan2g
drivers/staging/iio/accel/adis16240_core.c
4964
14685
/* * ADIS16240 Programmable Impact Sensor and Recorder driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "adis16240.h" #define DRIVER_NAME "adis16240" static int adis16240_check_status(struct iio_dev *indio_dev); /** * adis16240_spi_write_reg_8() - write single byte to a register * @indio_dev: iio_dev associated with device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16240_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16240_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16240_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16240_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio_dev for this device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16240_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16240_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, .delay_usecs = 35, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16240_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16240_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16240_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio_dev for this device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16240_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16240_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 35, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16240_READ_REG(lower_reg_address); st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static ssize_t adis16240_spi_read_signed(struct device *dev, struct device_attribute *attr, char *buf, unsigned bits) { struct iio_dev *indio_dev = dev_get_drvdata(dev); int ret; s16 val = 0; unsigned shift = 16 - bits; struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = adis16240_spi_read_reg_16(indio_dev, this_attr->address, (u16 *)&val); if (ret) return ret; if (val & ADIS16240_ERROR_ACTIVE) adis16240_check_status(indio_dev); val = ((s16)(val << shift) >> shift); return sprintf(buf, "%d\n", val); } static ssize_t adis16240_read_12bit_signed(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); /* Take the iio_dev status lock */ mutex_lock(&indio_dev->mlock); ret = adis16240_spi_read_signed(dev, attr, buf, 12); mutex_unlock(&indio_dev->mlock); return ret; } static int adis16240_reset(struct iio_dev *indio_dev) { int ret; ret = adis16240_spi_write_reg_8(indio_dev, ADIS16240_GLOB_CMD, ADIS16240_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16240_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); if (len < 1) return -EINVAL; switch (buf[0]) { case '1': case 'y': case 'Y': return adis16240_reset(indio_dev); } return -EINVAL; } int adis16240_set_irq(struct iio_dev *indio_dev, bool enable) { int ret = 0; u16 msc; ret = adis16240_spi_read_reg_16(indio_dev, ADIS16240_MSC_CTRL, &msc); if (ret) goto error_ret; msc |= ADIS16240_MSC_CTRL_ACTIVE_HIGH; msc &= ~ADIS16240_MSC_CTRL_DATA_RDY_DIO2; if (enable) msc |= ADIS16240_MSC_CTRL_DATA_RDY_EN; else msc &= ~ADIS16240_MSC_CTRL_DATA_RDY_EN; ret = adis16240_spi_write_reg_16(indio_dev, ADIS16240_MSC_CTRL, msc); error_ret: return ret; } static int adis16240_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16240_spi_write_reg_16(indio_dev, ADIS16240_MSC_CTRL, ADIS16240_MSC_CTRL_SELF_TEST_EN); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } msleep(ADIS16240_STARTUP_DELAY); adis16240_check_status(indio_dev); err_ret: return ret; } static int adis16240_check_status(struct iio_dev *indio_dev) { u16 status; int ret; struct device *dev = &indio_dev->dev; ret = adis16240_spi_read_reg_16(indio_dev, ADIS16240_DIAG_STAT, &status); if (ret < 0) { dev_err(dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x2F; if (status & ADIS16240_DIAG_STAT_PWRON_FAIL) dev_err(dev, "Power-on, self-test fail\n"); if (status & ADIS16240_DIAG_STAT_SPI_FAIL) dev_err(dev, "SPI failure\n"); if (status & ADIS16240_DIAG_STAT_FLASH_UPT) dev_err(dev, "Flash update failed\n"); if (status & ADIS16240_DIAG_STAT_POWER_HIGH) dev_err(dev, "Power supply above 3.625V\n"); if (status & ADIS16240_DIAG_STAT_POWER_LOW) dev_err(dev, "Power supply below 2.225V\n"); error_ret: return ret; } static int adis16240_initial_setup(struct iio_dev *indio_dev) { int ret; struct device *dev = &indio_dev->dev; /* Disable IRQ */ ret = adis16240_set_irq(indio_dev, false); if (ret) { dev_err(dev, "disable irq failed"); goto err_ret; } /* Do self test */ ret = adis16240_self_test(indio_dev); if (ret) { dev_err(dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16240_check_status(indio_dev); if (ret) { adis16240_reset(indio_dev); dev_err(dev, "device not playing ball -> reset"); msleep(ADIS16240_STARTUP_DELAY); ret = adis16240_check_status(indio_dev); if (ret) { dev_err(dev, "giving up"); goto err_ret; } } err_ret: return ret; } static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, S_IRUGO, adis16240_read_12bit_signed, NULL, ADIS16240_XYZPEAK_OUT); static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16240_write_reset, 0); static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096"); enum adis16240_chan { in_supply, in_aux, accel_x, accel_y, accel_z, temp, }; static const u8 adis16240_addresses[6][3] = { [in_supply] = { ADIS16240_SUPPLY_OUT }, [in_aux] = { ADIS16240_AUX_ADC }, [accel_x] = { ADIS16240_XACCL_OUT, ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT }, [accel_y] = { ADIS16240_YACCL_OUT, ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT }, [accel_z] = { ADIS16240_ZACCL_OUT, ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT }, [temp] = { ADIS16240_TEMP_OUT }, }; static int adis16240_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; int bits; u8 addr; s16 val16; switch (mask) { case 0: mutex_lock(&indio_dev->mlock); addr = adis16240_addresses[chan->address][0]; ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } if (val16 & ADIS16240_ERROR_ACTIVE) { ret = adis16240_check_status(indio_dev); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') val16 = (s16)(val16 << (16 - chan->scan_type.realbits)) >> (16 - chan->scan_type.realbits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: *val = 0; if (chan->channel == 0) *val2 = 4880; else return -EINVAL; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 0; *val2 = 244000; return IIO_VAL_INT_PLUS_MICRO; case IIO_ACCEL: *val = 0; *val2 = 504062; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } break; case IIO_CHAN_INFO_PEAK_SCALE: *val = 6; *val2 = 629295; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_OFFSET: *val = 25; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBBIAS: bits = 10; mutex_lock(&indio_dev->mlock); addr = adis16240_addresses[chan->address][1]; ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_PEAK: bits = 10; mutex_lock(&indio_dev->mlock); addr = adis16240_addresses[chan->address][2]; ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; } return -EINVAL; } static int adis16240_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int bits = 10; s16 val16; u8 addr; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: val16 = val & ((1 << bits) - 1); addr = adis16240_addresses[chan->address][1]; return adis16240_spi_write_reg_16(indio_dev, addr, val16); } return -EINVAL; } static struct iio_chan_spec adis16240_channels[] = { IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, in_supply, ADIS16240_SCAN_SUPPLY, IIO_ST('u', 10, 16, 0), 0), IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, 0, in_aux, ADIS16240_SCAN_AUX_ADC, IIO_ST('u', 10, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, accel_x, ADIS16240_SCAN_ACC_X, IIO_ST('s', 10, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, accel_y, ADIS16240_SCAN_ACC_Y, IIO_ST('s', 10, 16, 0), 0), IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, accel_z, ADIS16240_SCAN_ACC_Z, IIO_ST('s', 10, 16, 0), 0), IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, temp, ADIS16240_SCAN_TEMP, IIO_ST('u', 10, 16, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(6) }; static struct attribute *adis16240_attributes[] = { &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr, &iio_const_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, NULL }; static const struct attribute_group adis16240_attribute_group = { .attrs = adis16240_attributes, }; static const struct iio_info adis16240_info = { .attrs = &adis16240_attribute_group, .read_raw = &adis16240_read_raw, .write_raw = &adis16240_write_raw, .driver_module = THIS_MODULE, }; static int __devinit adis16240_probe(struct spi_device *spi) { int ret; struct adis16240_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16240_info; indio_dev->channels = adis16240_channels; indio_dev->num_channels = ARRAY_SIZE(adis16240_channels); indio_dev->modes = INDIO_DIRECT_MODE; ret = adis16240_configure_ring(indio_dev); if (ret) goto error_free_dev; ret = iio_buffer_register(indio_dev, adis16240_channels, ARRAY_SIZE(adis16240_channels)); if (ret) { printk(KERN_ERR "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } if (spi->irq) { ret = adis16240_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } /* Get the device into a sane initial state */ ret = adis16240_initial_setup(indio_dev); if (ret) goto error_remove_trigger; ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: adis16240_remove_trigger(indio_dev); error_uninitialize_ring: iio_buffer_unregister(indio_dev); error_unreg_ring_funcs: adis16240_unconfigure_ring(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adis16240_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); flush_scheduled_work(); iio_device_unregister(indio_dev); adis16240_remove_trigger(indio_dev); iio_buffer_unregister(indio_dev); adis16240_unconfigure_ring(indio_dev); iio_free_device(indio_dev); return 0; } static struct spi_driver adis16240_driver = { .driver = { .name = "adis16240", .owner = THIS_MODULE, }, .probe = adis16240_probe, .remove = __devexit_p(adis16240_remove), }; module_spi_driver(adis16240_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16240");
gpl-2.0
evil-god/runbo-q5x6
kernel/drivers/net/wireless/iwmc3200wifi/main.c
4964
21442
/* * Intel Wireless Multicomm 3200 WiFi driver * * Copyright (C) 2009 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Intel Corporation <ilw@linux.intel.com> * Samuel Ortiz <samuel.ortiz@intel.com> * Zhu Yi <yi.zhu@intel.com> * */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/sched.h> #include <linux/ieee80211.h> #include <linux/wireless.h> #include <linux/slab.h> #include <linux/moduleparam.h> #include "iwm.h" #include "debug.h" #include "bus.h" #include "umac.h" #include "commands.h" #include "hal.h" #include "fw.h" #include "rx.h" static struct iwm_conf def_iwm_conf = { .sdio_ior_timeout = 5000, .calib_map = BIT(CALIB_CFG_DC_IDX) | BIT(CALIB_CFG_LO_IDX) | BIT(CALIB_CFG_TX_IQ_IDX) | BIT(CALIB_CFG_RX_IQ_IDX) | BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), .expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) | BIT(PHY_CALIBRATE_LO_CMD) | BIT(PHY_CALIBRATE_TX_IQ_CMD) | BIT(PHY_CALIBRATE_RX_IQ_CMD) | BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), .ct_kill_entry = 110, .ct_kill_exit = 110, .reset_on_fatal_err = 1, .auto_connect = 1, .enable_qos = 1, .mode = UMAC_MODE_BSS, /* UMAC configuration */ .power_index = 0, .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, .cts_to_self = 0, .assoc_timeout = 2, .roam_timeout = 10, .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G | WIRELESS_MODE_11N, /* IBSS */ .ibss_band = UMAC_BAND_2GHZ, .ibss_channel = 1, .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03}, }; static bool modparam_reset; module_param_named(reset, modparam_reset, bool, 0644); MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])"); static bool modparam_wimax_enable = true; module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644); MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])"); int iwm_mode_to_nl80211_iftype(int mode) { switch (mode) { case UMAC_MODE_BSS: return NL80211_IFTYPE_STATION; case UMAC_MODE_IBSS: return NL80211_IFTYPE_ADHOC; default: return NL80211_IFTYPE_UNSPECIFIED; } return 0; } static void iwm_statistics_request(struct work_struct *work) { struct iwm_priv *iwm = container_of(work, struct iwm_priv, stats_request.work); iwm_send_umac_stats_req(iwm, 0); } static void iwm_disconnect_work(struct work_struct *work) { struct iwm_priv *iwm = container_of(work, struct iwm_priv, disconnect.work); if (iwm->umac_profile_active) iwm_invalidate_mlme_profile(iwm); clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); iwm->umac_profile_active = false; memset(iwm->bssid, 0, ETH_ALEN); iwm->channel = 0; iwm_link_off(iwm); wake_up_interruptible(&iwm->mlme_queue); cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); } static void iwm_ct_kill_work(struct work_struct *work) { struct iwm_priv *iwm = container_of(work, struct iwm_priv, ct_kill_delay.work); struct wiphy *wiphy = iwm_to_wiphy(iwm); IWM_INFO(iwm, "CT kill delay timeout\n"); wiphy_rfkill_set_hw_state(wiphy, false); } static int __iwm_up(struct iwm_priv *iwm); static int __iwm_down(struct iwm_priv *iwm); static void iwm_reset_worker(struct work_struct *work) { struct iwm_priv *iwm; struct iwm_umac_profile *profile = NULL; int uninitialized_var(ret), retry = 0; iwm = container_of(work, struct iwm_priv, reset_worker); /* * XXX: The iwm->mutex is introduced purely for this reset work, * because the other users for iwm_up and iwm_down are only netdev * ndo_open and ndo_stop which are already protected by rtnl. * Please remove iwm->mutex together if iwm_reset_worker() is not * required in the future. */ if (!mutex_trylock(&iwm->mutex)) { IWM_WARN(iwm, "We are in the middle of interface bringing " "UP/DOWN. Skip driver resetting.\n"); return; } if (iwm->umac_profile_active) { profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL); if (profile) memcpy(profile, iwm->umac_profile, sizeof(*profile)); else IWM_ERR(iwm, "Couldn't alloc memory for profile\n"); } __iwm_down(iwm); while (retry++ < 3) { ret = __iwm_up(iwm); if (!ret) break; schedule_timeout_uninterruptible(10 * HZ); } if (ret) { IWM_WARN(iwm, "iwm_up() failed: %d\n", ret); kfree(profile); goto out; } if (profile) { IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n"); memcpy(iwm->umac_profile, profile, sizeof(*profile)); iwm_send_mlme_profile(iwm); kfree(profile); } else clear_bit(IWM_STATUS_RESETTING, &iwm->status); out: mutex_unlock(&iwm->mutex); } static void iwm_auth_retry_worker(struct work_struct *work) { struct iwm_priv *iwm; int i, ret; iwm = container_of(work, struct iwm_priv, auth_retry_worker); if (iwm->umac_profile_active) { ret = iwm_invalidate_mlme_profile(iwm); if (ret < 0) return; } iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK; ret = iwm_send_mlme_profile(iwm); if (ret < 0) return; for (i = 0; i < IWM_NUM_KEYS; i++) if (iwm->keys[i].key_len) iwm_set_key(iwm, 0, &iwm->keys[i]); iwm_set_tx_key(iwm, iwm->default_key); } static void iwm_watchdog(unsigned long data) { struct iwm_priv *iwm = (struct iwm_priv *)data; IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n"); if (modparam_reset) iwm_resetting(iwm); } int iwm_priv_init(struct iwm_priv *iwm) { int i, j; char name[32]; iwm->status = 0; INIT_LIST_HEAD(&iwm->pending_notif); init_waitqueue_head(&iwm->notif_queue); init_waitqueue_head(&iwm->nonwifi_queue); init_waitqueue_head(&iwm->wifi_ntfy_queue); init_waitqueue_head(&iwm->mlme_queue); memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf)); spin_lock_init(&iwm->tx_credit.lock); INIT_LIST_HEAD(&iwm->wifi_pending_cmd); INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd); iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE; iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE; spin_lock_init(&iwm->cmd_lock); iwm->scan_id = 1; INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request); INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work); INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work); INIT_WORK(&iwm->reset_worker, iwm_reset_worker); INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker); INIT_LIST_HEAD(&iwm->bss_list); skb_queue_head_init(&iwm->rx_list); INIT_LIST_HEAD(&iwm->rx_tickets); spin_lock_init(&iwm->ticket_lock); for (i = 0; i < IWM_RX_ID_HASH; i++) { INIT_LIST_HEAD(&iwm->rx_packets[i]); spin_lock_init(&iwm->packet_lock[i]); } INIT_WORK(&iwm->rx_worker, iwm_rx_worker); iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx"); if (!iwm->rx_wq) return -EAGAIN; for (i = 0; i < IWM_TX_QUEUES; i++) { INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker); snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i); iwm->txq[i].id = i; iwm->txq[i].wq = create_singlethread_workqueue(name); if (!iwm->txq[i].wq) return -EAGAIN; skb_queue_head_init(&iwm->txq[i].queue); skb_queue_head_init(&iwm->txq[i].stopped_queue); spin_lock_init(&iwm->txq[i].lock); } for (i = 0; i < IWM_NUM_KEYS; i++) memset(&iwm->keys[i], 0, sizeof(struct iwm_key)); iwm->default_key = -1; for (i = 0; i < IWM_STA_TABLE_NUM; i++) for (j = 0; j < IWM_UMAC_TID_NR; j++) { mutex_init(&iwm->sta_table[i].tid_info[j].mutex); iwm->sta_table[i].tid_info[j].stopped = false; } init_timer(&iwm->watchdog); iwm->watchdog.function = iwm_watchdog; iwm->watchdog.data = (unsigned long)iwm; mutex_init(&iwm->mutex); iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr), GFP_KERNEL); if (iwm->last_fw_err == NULL) return -ENOMEM; return 0; } void iwm_priv_deinit(struct iwm_priv *iwm) { int i; for (i = 0; i < IWM_TX_QUEUES; i++) destroy_workqueue(iwm->txq[i].wq); destroy_workqueue(iwm->rx_wq); kfree(iwm->last_fw_err); } /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload * the firmware. */ void iwm_reset(struct iwm_priv *iwm) { struct iwm_notif *notif, *next; if (test_bit(IWM_STATUS_READY, &iwm->status)) iwm_target_reset(iwm); if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) { iwm->status = 0; set_bit(IWM_STATUS_RESETTING, &iwm->status); } else iwm->status = 0; iwm->scan_id = 1; list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) { list_del(&notif->pending); kfree(notif->buf); kfree(notif); } iwm_cmd_flush(iwm); flush_workqueue(iwm->rx_wq); iwm_link_off(iwm); } void iwm_resetting(struct iwm_priv *iwm) { set_bit(IWM_STATUS_RESETTING, &iwm->status); schedule_work(&iwm->reset_worker); } /* * Notification code: * * We're faced with the following issue: Any host command can * have an answer or not, and if there's an answer to expect, * it can be treated synchronously or asynchronously. * To work around the synchronous answer case, we implemented * our notification mechanism. * When a code path needs to wait for a command response * synchronously, it calls notif_handle(), which waits for the * right notification to show up, and then process it. Before * starting to wait, it registered as a waiter for this specific * answer (by toggling a bit in on of the handler_map), so that * the rx code knows that it needs to send a notification to the * waiting processes. It does so by calling iwm_notif_send(), * which adds the notification to the pending notifications list, * and then wakes the waiting processes up. */ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd, u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size) { struct iwm_notif *notif; notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL); if (!notif) { IWM_ERR(iwm, "Couldn't alloc memory for notification\n"); return -ENOMEM; } INIT_LIST_HEAD(&notif->pending); notif->cmd = cmd; notif->cmd_id = cmd_id; notif->src = source; notif->buf = kzalloc(buf_size, GFP_KERNEL); if (!notif->buf) { IWM_ERR(iwm, "Couldn't alloc notification buffer\n"); kfree(notif); return -ENOMEM; } notif->buf_size = buf_size; memcpy(notif->buf, buf, buf_size); list_add_tail(&notif->pending, &iwm->pending_notif); wake_up_interruptible(&iwm->notif_queue); return 0; } static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd, u8 source) { struct iwm_notif *notif; list_for_each_entry(notif, &iwm->pending_notif, pending) { if ((notif->cmd_id == cmd) && (notif->src == source)) { list_del(&notif->pending); return notif; } } return NULL; } static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout) { int ret; struct iwm_notif *notif; unsigned long *map = NULL; switch (source) { case IWM_SRC_LMAC: map = &iwm->lmac_handler_map[0]; break; case IWM_SRC_UMAC: map = &iwm->umac_handler_map[0]; break; case IWM_SRC_UDMA: map = &iwm->udma_handler_map[0]; break; } set_bit(cmd, map); ret = wait_event_interruptible_timeout(iwm->notif_queue, ((notif = iwm_notif_find(iwm, cmd, source)) != NULL), timeout); clear_bit(cmd, map); if (!ret) return NULL; return notif; } int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout) { int ret; struct iwm_notif *notif; notif = iwm_notif_wait(iwm, cmd, source, timeout); if (!notif) return -ETIME; ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd); kfree(notif->buf); kfree(notif); return ret; } static int iwm_config_boot_params(struct iwm_priv *iwm) { struct iwm_udma_nonwifi_cmd target_cmd; int ret; /* check Wimax is off and config debug monitor */ if (!modparam_wimax_enable) { u32 data1 = 0x1f; u32 addr1 = 0x606BE258; u32 data2_set = 0x0; u32 data2_clr = 0x1; u32 addr2 = 0x606BE100; u32 data3 = 0x1; u32 addr3 = 0x606BEC00; target_cmd.resp = 0; target_cmd.handle_by_hw = 0; target_cmd.eop = 1; target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE; target_cmd.addr = cpu_to_le32(addr1); target_cmd.op1_sz = cpu_to_le32(sizeof(u32)); target_cmd.op2 = 0; ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1); if (ret < 0) { IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); return ret; } target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE; target_cmd.addr = cpu_to_le32(addr2); target_cmd.op1_sz = cpu_to_le32(data2_set); target_cmd.op2 = cpu_to_le32(data2_clr); ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1); if (ret < 0) { IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); return ret; } target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE; target_cmd.addr = cpu_to_le32(addr3); target_cmd.op1_sz = cpu_to_le32(sizeof(u32)); target_cmd.op2 = 0; ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3); if (ret < 0) { IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n"); return ret; } } return 0; } void iwm_init_default_profile(struct iwm_priv *iwm, struct iwm_umac_profile *profile) { memset(profile, 0, sizeof(struct iwm_umac_profile)); profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN; profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE; profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE; profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE; if (iwm->conf.enable_qos) profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED); profile->wireless_mode = iwm->conf.wireless_mode; profile->mode = cpu_to_le32(iwm->conf.mode); profile->ibss.atim = 0; profile->ibss.beacon_interval = 100; profile->ibss.join_only = 0; profile->ibss.band = iwm->conf.ibss_band; profile->ibss.channel = iwm->conf.ibss_channel; } void iwm_link_on(struct iwm_priv *iwm) { netif_carrier_on(iwm_to_ndev(iwm)); netif_tx_wake_all_queues(iwm_to_ndev(iwm)); iwm_send_umac_stats_req(iwm, 0); } void iwm_link_off(struct iwm_priv *iwm) { struct iw_statistics *wstats = &iwm->wstats; int i; netif_tx_stop_all_queues(iwm_to_ndev(iwm)); netif_carrier_off(iwm_to_ndev(iwm)); for (i = 0; i < IWM_TX_QUEUES; i++) { skb_queue_purge(&iwm->txq[i].queue); skb_queue_purge(&iwm->txq[i].stopped_queue); iwm->txq[i].concat_count = 0; iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf; flush_workqueue(iwm->txq[i].wq); } iwm_rx_free(iwm); cancel_delayed_work_sync(&iwm->stats_request); memset(wstats, 0, sizeof(struct iw_statistics)); wstats->qual.updated = IW_QUAL_ALL_INVALID; kfree(iwm->req_ie); iwm->req_ie = NULL; iwm->req_ie_len = 0; kfree(iwm->resp_ie); iwm->resp_ie = NULL; iwm->resp_ie_len = 0; del_timer_sync(&iwm->watchdog); } static void iwm_bss_list_clean(struct iwm_priv *iwm) { struct iwm_bss_info *bss, *next; list_for_each_entry_safe(bss, next, &iwm->bss_list, node) { list_del(&bss->node); kfree(bss->bss); kfree(bss); } } static int iwm_channels_init(struct iwm_priv *iwm) { int ret; ret = iwm_send_umac_channel_list(iwm); if (ret) { IWM_ERR(iwm, "Send channel list failed\n"); return ret; } ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST, IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); if (ret) { IWM_ERR(iwm, "Didn't get a channel list notification\n"); return ret; } return 0; } static int __iwm_up(struct iwm_priv *iwm) { int ret; struct iwm_notif *notif_reboot, *notif_ack = NULL; struct wiphy *wiphy = iwm_to_wiphy(iwm); u32 wireless_mode; ret = iwm_bus_enable(iwm); if (ret) { IWM_ERR(iwm, "Couldn't enable function\n"); return ret; } iwm_rx_setup_handlers(iwm); /* Wait for initial BARKER_REBOOT from hardware */ notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION, IWM_SRC_UDMA, 2 * HZ); if (!notif_reboot) { IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n"); goto err_disable; } /* We send the barker back */ ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16); if (ret) { IWM_ERR(iwm, "REBOOT barker response failed\n"); kfree(notif_reboot); goto err_disable; } kfree(notif_reboot->buf); kfree(notif_reboot); /* Wait for ACK_BARKER from hardware */ notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION, IWM_SRC_UDMA, 2 * HZ); if (!notif_ack) { IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n"); goto err_disable; } kfree(notif_ack->buf); kfree(notif_ack); /* We start to config static boot parameters */ ret = iwm_config_boot_params(iwm); if (ret) { IWM_ERR(iwm, "Config boot parameters failed\n"); goto err_disable; } ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr); if (ret) { IWM_ERR(iwm, "MAC reading failed\n"); goto err_disable; } memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr, ETH_ALEN); /* We can load the FWs */ ret = iwm_load_fw(iwm); if (ret) { IWM_ERR(iwm, "FW loading failed\n"); goto err_disable; } ret = iwm_eeprom_fat_channels(iwm); if (ret) { IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n"); goto err_fw; } /* * Read our SKU capabilities. * If it's valid, we AND the configured wireless mode with the * device EEPROM value as the current profile wireless mode. */ wireless_mode = iwm_eeprom_wireless_mode(iwm); if (wireless_mode) { iwm->conf.wireless_mode &= wireless_mode; if (iwm->umac_profile) iwm->umac_profile->wireless_mode = iwm->conf.wireless_mode; } else IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n", *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP))); snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s", iwm->lmac_version, iwm->umac_version); /* We configure the UMAC and enable the wifi module */ ret = iwm_send_umac_config(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) | cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) | cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN)); if (ret) { IWM_ERR(iwm, "UMAC config failed\n"); goto err_fw; } ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS, IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); if (ret) { IWM_ERR(iwm, "Didn't get a wifi core status notification\n"); goto err_fw; } if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN | UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) { IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n", iwm->core_enabled); ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS, IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT); if (ret) { IWM_ERR(iwm, "Didn't get a core status notification\n"); goto err_fw; } if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN | UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) { IWM_ERR(iwm, "Not all cores enabled: 0x%x\n", iwm->core_enabled); goto err_fw; } else { IWM_INFO(iwm, "All cores enabled\n"); } } ret = iwm_channels_init(iwm); if (ret < 0) { IWM_ERR(iwm, "Couldn't init channels\n"); goto err_fw; } /* Set the READY bit to indicate interface is brought up successfully */ set_bit(IWM_STATUS_READY, &iwm->status); return 0; err_fw: iwm_eeprom_exit(iwm); err_disable: ret = iwm_bus_disable(iwm); if (ret < 0) IWM_ERR(iwm, "Couldn't disable function\n"); return -EIO; } int iwm_up(struct iwm_priv *iwm) { int ret; mutex_lock(&iwm->mutex); ret = __iwm_up(iwm); mutex_unlock(&iwm->mutex); return ret; } static int __iwm_down(struct iwm_priv *iwm) { int ret; /* The interface is already down */ if (!test_bit(IWM_STATUS_READY, &iwm->status)) return 0; if (iwm->scan_request) { cfg80211_scan_done(iwm->scan_request, true); iwm->scan_request = NULL; } clear_bit(IWM_STATUS_READY, &iwm->status); iwm_eeprom_exit(iwm); iwm_bss_list_clean(iwm); iwm_init_default_profile(iwm, iwm->umac_profile); iwm->umac_profile_active = false; iwm->default_key = -1; iwm->core_enabled = 0; ret = iwm_bus_disable(iwm); if (ret < 0) { IWM_ERR(iwm, "Couldn't disable function\n"); return ret; } return 0; } int iwm_down(struct iwm_priv *iwm) { int ret; mutex_lock(&iwm->mutex); ret = __iwm_down(iwm); mutex_unlock(&iwm->mutex); return ret; }
gpl-2.0
showliu/android_kernel_xiaomi_aries-1
drivers/staging/iio/accel/adis16203_core.c
4964
12419
/* * ADIS16203 Programmable Digital Vibration Sensor driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "adis16203.h" #define DRIVER_NAME "adis16203" /** * adis16203_spi_write_reg_8() - write single byte to a register * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16203_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16203_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16203_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16203_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16203_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16203_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16203_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16203_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16203_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio device associated with child of actual device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16203_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16203_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 20, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .delay_usecs = 20, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16203_READ_REG(lower_reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static int adis16203_check_status(struct iio_dev *indio_dev) { u16 status; int ret; ret = adis16203_spi_read_reg_16(indio_dev, ADIS16203_DIAG_STAT, &status); if (ret < 0) { dev_err(&indio_dev->dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x1F; if (status & ADIS16203_DIAG_STAT_SELFTEST_FAIL) dev_err(&indio_dev->dev, "Self test failure\n"); if (status & ADIS16203_DIAG_STAT_SPI_FAIL) dev_err(&indio_dev->dev, "SPI failure\n"); if (status & ADIS16203_DIAG_STAT_FLASH_UPT) dev_err(&indio_dev->dev, "Flash update failed\n"); if (status & ADIS16203_DIAG_STAT_POWER_HIGH) dev_err(&indio_dev->dev, "Power supply above 3.625V\n"); if (status & ADIS16203_DIAG_STAT_POWER_LOW) dev_err(&indio_dev->dev, "Power supply below 3.15V\n"); error_ret: return ret; } static int adis16203_reset(struct iio_dev *indio_dev) { int ret; ret = adis16203_spi_write_reg_8(indio_dev, ADIS16203_GLOB_CMD, ADIS16203_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16203_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); if (len < 1) return -EINVAL; switch (buf[0]) { case '1': case 'y': case 'Y': return adis16203_reset(indio_dev); } return -EINVAL; } int adis16203_set_irq(struct iio_dev *indio_dev, bool enable) { int ret = 0; u16 msc; ret = adis16203_spi_read_reg_16(indio_dev, ADIS16203_MSC_CTRL, &msc); if (ret) goto error_ret; msc |= ADIS16203_MSC_CTRL_ACTIVE_HIGH; msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_DIO1; if (enable) msc |= ADIS16203_MSC_CTRL_DATA_RDY_EN; else msc &= ~ADIS16203_MSC_CTRL_DATA_RDY_EN; ret = adis16203_spi_write_reg_16(indio_dev, ADIS16203_MSC_CTRL, msc); error_ret: return ret; } static int adis16203_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16203_spi_write_reg_16(indio_dev, ADIS16203_MSC_CTRL, ADIS16203_MSC_CTRL_SELF_TEST_EN); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } adis16203_check_status(indio_dev); err_ret: return ret; } static int adis16203_initial_setup(struct iio_dev *indio_dev) { int ret; /* Disable IRQ */ ret = adis16203_set_irq(indio_dev, false); if (ret) { dev_err(&indio_dev->dev, "disable irq failed"); goto err_ret; } /* Do self test */ ret = adis16203_self_test(indio_dev); if (ret) { dev_err(&indio_dev->dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16203_check_status(indio_dev); if (ret) { adis16203_reset(indio_dev); dev_err(&indio_dev->dev, "device not playing ball -> reset"); msleep(ADIS16203_STARTUP_DELAY); ret = adis16203_check_status(indio_dev); if (ret) { dev_err(&indio_dev->dev, "giving up"); goto err_ret; } } err_ret: return ret; } enum adis16203_chan { in_supply, in_aux, incli_x, incli_y, temp, }; static u8 adis16203_addresses[5][2] = { [in_supply] = { ADIS16203_SUPPLY_OUT }, [in_aux] = { ADIS16203_AUX_ADC }, [incli_x] = { ADIS16203_XINCL_OUT, ADIS16203_INCL_NULL}, [incli_y] = { ADIS16203_YINCL_OUT }, [temp] = { ADIS16203_TEMP_OUT } }; static int adis16203_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { /* currently only one writable parameter which keeps this simple */ u8 addr = adis16203_addresses[chan->address][1]; return adis16203_spi_write_reg_16(indio_dev, addr, val & 0x3FFF); } static int adis16203_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; int bits; u8 addr; s16 val16; switch (mask) { case 0: mutex_lock(&indio_dev->mlock); addr = adis16203_addresses[chan->address][0]; ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } if (val16 & ADIS16203_ERROR_ACTIVE) { ret = adis16203_check_status(indio_dev); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') val16 = (s16)(val16 << (16 - chan->scan_type.realbits)) >> (16 - chan->scan_type.realbits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: *val = 0; if (chan->channel == 0) *val2 = 1220; else *val2 = 610; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 0; *val2 = -470000; return IIO_VAL_INT_PLUS_MICRO; case IIO_INCLI: *val = 0; *val2 = 25000; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } case IIO_CHAN_INFO_OFFSET: *val = 25; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBBIAS: bits = 14; mutex_lock(&indio_dev->mlock); addr = adis16203_addresses[chan->address][1]; ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; default: return -EINVAL; } } static struct iio_chan_spec adis16203_channels[] = { IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, in_supply, ADIS16203_SCAN_SUPPLY, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT, in_aux, ADIS16203_SCAN_AUX_ADC, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X, IIO_CHAN_INFO_SCALE_SHARED_BIT | IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, incli_x, ADIS16203_SCAN_INCLI_X, IIO_ST('s', 14, 16, 0), 0), /* Fixme: Not what it appears to be - see data sheet */ IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y, IIO_CHAN_INFO_SCALE_SHARED_BIT, incli_y, ADIS16203_SCAN_INCLI_Y, IIO_ST('s', 14, 16, 0), 0), IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, IIO_CHAN_INFO_SCALE_SEPARATE_BIT | IIO_CHAN_INFO_OFFSET_SEPARATE_BIT, temp, ADIS16203_SCAN_TEMP, IIO_ST('u', 12, 16, 0), 0), IIO_CHAN_SOFT_TIMESTAMP(5), }; static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16203_write_reset, 0); static struct attribute *adis16203_attributes[] = { &iio_dev_attr_reset.dev_attr.attr, NULL }; static const struct attribute_group adis16203_attribute_group = { .attrs = adis16203_attributes, }; static const struct iio_info adis16203_info = { .attrs = &adis16203_attribute_group, .read_raw = &adis16203_read_raw, .write_raw = &adis16203_write_raw, .driver_module = THIS_MODULE, }; static int __devinit adis16203_probe(struct spi_device *spi) { int ret; struct iio_dev *indio_dev; struct adis16203_state *st; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); /* this is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi->dev.driver->name; indio_dev->dev.parent = &spi->dev; indio_dev->channels = adis16203_channels; indio_dev->num_channels = ARRAY_SIZE(adis16203_channels); indio_dev->info = &adis16203_info; indio_dev->modes = INDIO_DIRECT_MODE; ret = adis16203_configure_ring(indio_dev); if (ret) goto error_free_dev; ret = iio_buffer_register(indio_dev, adis16203_channels, ARRAY_SIZE(adis16203_channels)); if (ret) { printk(KERN_ERR "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } if (spi->irq) { ret = adis16203_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } /* Get the device into a sane initial state */ ret = adis16203_initial_setup(indio_dev); if (ret) goto error_remove_trigger; ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: adis16203_remove_trigger(indio_dev); error_uninitialize_ring: iio_buffer_unregister(indio_dev); error_unreg_ring_funcs: adis16203_unconfigure_ring(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adis16203_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); iio_device_unregister(indio_dev); adis16203_remove_trigger(indio_dev); iio_buffer_unregister(indio_dev); adis16203_unconfigure_ring(indio_dev); iio_free_device(indio_dev); return 0; } static struct spi_driver adis16203_driver = { .driver = { .name = "adis16203", .owner = THIS_MODULE, }, .probe = adis16203_probe, .remove = __devexit_p(adis16203_remove), }; module_spi_driver(adis16203_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:adis16203");
gpl-2.0
THCue/android_kernel_lge_v410
arch/powerpc/lib/locks.c
6756
2225
/* * Spin and read/write lock operations. * * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM * Rework to support virtual processors * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/stringify.h> #include <linux/smp.h> /* waiting for a spinlock... */ #if defined(CONFIG_PPC_SPLPAR) #include <asm/hvcall.h> #include <asm/smp.h> void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; lock_value = lock->slock; if (lock_value == 0) return; holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); yield_count = lppaca_of(holder_cpu).yield_count; if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (lock->slock != lock_value) return; /* something has changed */ plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), yield_count); } /* * Waiting for a read lock or a write lock on a rwlock... * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ void __rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; lock_value = rw->lock; if (lock_value >= 0) return; /* no write lock at present */ holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); yield_count = lppaca_of(holder_cpu).yield_count; if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); if (rw->lock != lock_value) return; /* something has changed */ plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), yield_count); } #endif void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); } HMT_medium(); } EXPORT_SYMBOL(arch_spin_unlock_wait);
gpl-2.0
EloYGomeZ/caf-j1-exp
drivers/net/wireless/wl1251/ps.c
8292
4284
/* * This file is part of wl1251 * * Copyright (C) 2008 Nokia Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "reg.h" #include "ps.h" #include "cmd.h" #include "io.h" /* in ms */ #define WL1251_WAKEUP_TIMEOUT 100 void wl1251_elp_work(struct work_struct *work) { struct delayed_work *dwork; struct wl1251 *wl; dwork = container_of(work, struct delayed_work, work); wl = container_of(dwork, struct wl1251, elp_work); wl1251_debug(DEBUG_PSM, "elp work"); mutex_lock(&wl->mutex); if (wl->elp || wl->station_mode == STATION_ACTIVE_MODE) goto out; wl1251_debug(DEBUG_PSM, "chip to elp"); wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); wl->elp = true; out: mutex_unlock(&wl->mutex); } #define ELP_ENTRY_DELAY 5 /* Routines to toggle sleep mode while in ELP */ void wl1251_ps_elp_sleep(struct wl1251 *wl) { unsigned long delay; if (wl->station_mode != STATION_ACTIVE_MODE) { delay = msecs_to_jiffies(ELP_ENTRY_DELAY); ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay); } } int wl1251_ps_elp_wakeup(struct wl1251 *wl) { unsigned long timeout, start; u32 elp_reg; if (delayed_work_pending(&wl->elp_work)) cancel_delayed_work(&wl->elp_work); if (!wl->elp) return 0; wl1251_debug(DEBUG_PSM, "waking up chip from elp"); start = jiffies; timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); /* * FIXME: we should wait for irq from chip but, as a temporary * solution to simplify locking, let's poll instead */ while (!(elp_reg & ELPCTRL_WLAN_READY)) { if (time_after(jiffies, timeout)) { wl1251_error("elp wakeup timeout"); return -ETIMEDOUT; } msleep(1); elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); } wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", jiffies_to_msecs(jiffies - start)); wl->elp = false; return 0; } int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode) { int ret; switch (mode) { case STATION_POWER_SAVE_MODE: wl1251_debug(DEBUG_PSM, "entering psm"); /* enable beacon filtering */ ret = wl1251_acx_beacon_filter_opt(wl, true); if (ret < 0) return ret; ret = wl1251_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, wl->listen_int); if (ret < 0) return ret; ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE, WL1251_DEFAULT_BET_CONSECUTIVE); if (ret < 0) return ret; ret = wl1251_cmd_ps_mode(wl, CHIP_POWER_SAVE_MODE); if (ret < 0) return ret; ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); if (ret < 0) return ret; break; case STATION_IDLE: wl1251_debug(DEBUG_PSM, "entering idle"); ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); if (ret < 0) return ret; ret = wl1251_cmd_template_set(wl, CMD_DISCONNECT, NULL, 0); if (ret < 0) return ret; break; case STATION_ACTIVE_MODE: default: wl1251_debug(DEBUG_PSM, "leaving psm"); ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); if (ret < 0) return ret; /* disable BET */ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE, WL1251_DEFAULT_BET_CONSECUTIVE); if (ret < 0) return ret; /* disable beacon filtering */ ret = wl1251_acx_beacon_filter_opt(wl, false); if (ret < 0) return ret; ret = wl1251_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, wl->listen_int); if (ret < 0) return ret; ret = wl1251_cmd_ps_mode(wl, CHIP_ACTIVE_MODE); if (ret < 0) return ret; break; } wl->station_mode = mode; return ret; }
gpl-2.0
talnoah/Lemur_UpdatedBase
Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
12900
1724
/* * Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...) * * Tests if the control register is updated correctly * at context switches * * Warning: this test will cause a very high load for a few seconds * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <inttypes.h> #include <wait.h> #include <sys/prctl.h> #include <linux/prctl.h> /* Get/set the process' ability to use the timestamp counter instruction */ #ifndef PR_GET_TSC #define PR_GET_TSC 25 #define PR_SET_TSC 26 # define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ # define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ #endif uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } void sigsegv_expect(int sig) { /* */ } void segvtask(void) { if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0) { perror("prctl"); exit(0); } signal(SIGSEGV, sigsegv_expect); alarm(10); rdtsc(); fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n"); exit(0); } void sigsegv_fail(int sig) { fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n"); exit(0); } void rdtsctask(void) { if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0) { perror("prctl"); exit(0); } signal(SIGSEGV, sigsegv_fail); alarm(10); for(;;) rdtsc(); } int main(int argc, char **argv) { int n_tasks = 100, i; fprintf(stderr, "[No further output means we're allright]\n"); for (i=0; i<n_tasks; i++) if (fork() == 0) { if (i & 1) segvtask(); else rdtsctask(); } for (i=0; i<n_tasks; i++) wait(NULL); exit(0); }
gpl-2.0
adwaitnd/Kronux
drivers/acpi/sleep.c
613
21432
/* * sleep.c - ACPI sleep support. * * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2000-2003 Patrick Mochel * Copyright (c) 2003 Open Source Development Lab * * This file is released under the GPLv2. * */ #include <linux/delay.h> #include <linux/irq.h> #include <linux/dmi.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/acpi.h> #include <linux/module.h> #include <asm/io.h> #include <trace/events/power.h> #include "internal.h" #include "sleep.h" static u8 sleep_states[ACPI_S_STATE_COUNT]; static void acpi_sleep_tts_switch(u32 acpi_state) { acpi_status status; status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state); if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { /* * OS can't evaluate the _TTS object correctly. Some warning * message will be printed. But it won't break anything. */ printk(KERN_NOTICE "Failure in evaluating _TTS object\n"); } } static int tts_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { acpi_sleep_tts_switch(ACPI_STATE_S5); return NOTIFY_DONE; } static struct notifier_block tts_notifier = { .notifier_call = tts_notify_reboot, .next = NULL, .priority = 0, }; static int acpi_sleep_prepare(u32 acpi_state) { #ifdef CONFIG_ACPI_SLEEP /* do we have a wakeup address for S2 and S3? */ if (acpi_state == ACPI_STATE_S3) { if (!acpi_wakeup_address) return -EFAULT; acpi_set_firmware_waking_vector(acpi_wakeup_address); } ACPI_FLUSH_CPU_CACHE(); #endif printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n", acpi_state); acpi_enable_wakeup_devices(acpi_state); acpi_enter_sleep_state_prep(acpi_state); return 0; } static bool acpi_sleep_state_supported(u8 sleep_state) { acpi_status status; u8 type_a, type_b; status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware || (acpi_gbl_FADT.sleep_control.address && acpi_gbl_FADT.sleep_status.address)); } #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; u32 acpi_target_system_state(void) { return acpi_target_sleep_state; } EXPORT_SYMBOL_GPL(acpi_target_system_state); static bool pwr_btn_event_pending; /* * The ACPI specification wants us to save NVS memory regions during hibernation * and to restore them during the subsequent resume. Windows does that also for * suspend to RAM. However, it is known that this mechanism does not work on * all machines, so we allow the user to disable it with the help of the * 'acpi_sleep=nonvs' kernel command line option. */ static bool nvs_nosave; void __init acpi_nvs_nosave(void) { nvs_nosave = true; } /* * The ACPI specification wants us to save NVS memory regions during hibernation * but says nothing about saving NVS during S3. Not all versions of Windows * save NVS on S3 suspend either, and it is clear that not all systems need * NVS to be saved at S3 time. To improve suspend/resume time, allow the * user to disable saving NVS on S3 if their system does not require it, but * continue to save/restore NVS for S4 as specified. */ static bool nvs_nosave_s3; void __init acpi_nvs_nosave_s3(void) { nvs_nosave_s3 = true; } /* * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the * user to request that behavior by using the 'acpi_old_suspend_ordering' * kernel command line option that causes the following variable to be set. */ static bool old_suspend_ordering; void __init acpi_old_suspend_ordering(void) { old_suspend_ordering = true; } static int __init init_old_suspend_ordering(const struct dmi_system_id *d) { acpi_old_suspend_ordering(); return 0; } static int __init init_nvs_nosave(const struct dmi_system_id *d) { acpi_nvs_nosave(); return 0; } static struct dmi_system_id acpisleep_dmi_table[] __initdata = { { .callback = init_old_suspend_ordering, .ident = "Abit KN9 (nForce4 variant)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), }, }, { .callback = init_old_suspend_ordering, .ident = "HP xw4600 Workstation", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), }, }, { .callback = init_old_suspend_ordering, .ident = "Panasonic CF51-2L", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Matsushita Electric Industrial Co.,Ltd."), DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW41E_H", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW21M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB17FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR11M", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), }, }, { .callback = init_nvs_nosave, .ident = "Everex StepNote Series", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1Z1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-NW130D", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCCW29FX", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), }, }, { .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI DELUXE", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), }, }, { .callback = init_old_suspend_ordering, .ident = "Asus A8N-SLI Premium", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-SR26GN_P", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VPCEB1S1E", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), }, }, { .callback = init_nvs_nosave, .ident = "Sony Vaio VGN-FW520F", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54C", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), }, }, { .callback = init_nvs_nosave, .ident = "Asus K54HR", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), }, }, {}, }; static void __init acpi_sleep_dmi_check(void) { int year; if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012) acpi_nvs_nosave_s3(); dmi_check_system(acpisleep_dmi_table); } /** * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. */ static int acpi_pm_freeze(void) { acpi_disable_all_gpes(); acpi_os_wait_events_complete(); acpi_ec_block_transactions(); return 0; } /** * acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. */ static int acpi_pm_pre_suspend(void) { acpi_pm_freeze(); return suspend_nvs_save(); } /** * __acpi_pm_prepare - Prepare the platform to enter the target state. * * If necessary, set the firmware waking vector and do arch-specific * nastiness to get the wakeup code to the waking vector. */ static int __acpi_pm_prepare(void) { int error = acpi_sleep_prepare(acpi_target_sleep_state); if (error) acpi_target_sleep_state = ACPI_STATE_S0; return error; } /** * acpi_pm_prepare - Prepare the platform to enter the target sleep * state and disable the GPEs. */ static int acpi_pm_prepare(void) { int error = __acpi_pm_prepare(); if (!error) error = acpi_pm_pre_suspend(); return error; } static int find_powerf_dev(struct device *dev, void *data) { struct acpi_device *device = to_acpi_device(dev); const char *hid = acpi_device_hid(device); return !strcmp(hid, ACPI_BUTTON_HID_POWERF); } /** * acpi_pm_finish - Instruct the platform to leave a sleep state. * * This is called after we wake back up (or if entering the sleep state * failed). */ static void acpi_pm_finish(void) { struct device *pwr_btn_dev; u32 acpi_state = acpi_target_sleep_state; acpi_ec_unblock_transactions(); suspend_nvs_free(); if (acpi_state == ACPI_STATE_S0) return; printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n", acpi_state); acpi_disable_wakeup_devices(acpi_state); acpi_leave_sleep_state(acpi_state); /* reset firmware waking vector */ acpi_set_firmware_waking_vector((acpi_physical_address) 0); acpi_target_sleep_state = ACPI_STATE_S0; acpi_resume_power_resources(); /* If we were woken with the fixed power button, provide a small * hint to userspace in the form of a wakeup event on the fixed power * button device (if it can be found). * * We delay the event generation til now, as the PM layer requires * timekeeping to be running before we generate events. */ if (!pwr_btn_event_pending) return; pwr_btn_event_pending = false; pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL, find_powerf_dev); if (pwr_btn_dev) { pm_wakeup_event(pwr_btn_dev, 0); put_device(pwr_btn_dev); } } /** * acpi_pm_start - Start system PM transition. */ static void acpi_pm_start(u32 acpi_state) { acpi_target_sleep_state = acpi_state; acpi_sleep_tts_switch(acpi_target_sleep_state); acpi_scan_lock_acquire(); } /** * acpi_pm_end - Finish up system PM transition. */ static void acpi_pm_end(void) { acpi_scan_lock_release(); /* * This is necessary in case acpi_pm_finish() is not called during a * failing transition to a sleep state. */ acpi_target_sleep_state = ACPI_STATE_S0; acpi_sleep_tts_switch(acpi_target_sleep_state); } #else /* !CONFIG_ACPI_SLEEP */ #define acpi_target_sleep_state ACPI_STATE_S0 static inline void acpi_sleep_dmi_check(void) {} #endif /* CONFIG_ACPI_SLEEP */ #ifdef CONFIG_SUSPEND static u32 acpi_suspend_states[] = { [PM_SUSPEND_ON] = ACPI_STATE_S0, [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, [PM_SUSPEND_MEM] = ACPI_STATE_S3, [PM_SUSPEND_MAX] = ACPI_STATE_S5 }; /** * acpi_suspend_begin - Set the target system sleep state to the state * associated with given @pm_state, if supported. */ static int acpi_suspend_begin(suspend_state_t pm_state) { u32 acpi_state = acpi_suspend_states[pm_state]; int error; error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); if (error) return error; if (!sleep_states[acpi_state]) { pr_err("ACPI does not support sleep state S%u\n", acpi_state); return -ENOSYS; } acpi_pm_start(acpi_state); return 0; } /** * acpi_suspend_enter - Actually enter a sleep state. * @pm_state: ignored * * Flush caches and go to sleep. For STR we have to call arch-specific * assembly, which in turn call acpi_enter_sleep_state(). * It's unfortunate, but it works. Please fix if you're feeling frisky. */ static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; u32 acpi_state = acpi_target_sleep_state; int error; ACPI_FLUSH_CPU_CACHE(); trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true); switch (acpi_state) { case ACPI_STATE_S1: barrier(); status = acpi_enter_sleep_state(acpi_state); break; case ACPI_STATE_S3: if (!acpi_suspend_lowlevel) return -ENOSYS; error = acpi_suspend_lowlevel(); if (error) return error; pr_info(PREFIX "Low-level resume complete\n"); break; } trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false); /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers */ acpi_leave_sleep_state_prep(acpi_state); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the * POWER_BUTTON event should not reach userspace ] * * However, we do generate a small hint for userspace in the form of * a wakeup event. We flag this condition for now and generate the * event later, as we're currently too early in resume to be able to * generate wakeup events. */ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED; acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); if (pwr_btn_status & ACPI_EVENT_FLAG_SET) { acpi_clear_event(ACPI_EVENT_POWER_BUTTON); /* Flag for later */ pwr_btn_event_pending = true; } } /* * Disable and clear GPE status before interrupt is enabled. Some GPEs * (like wakeup GPE) haven't handler, this can avoid such GPE misfire. * acpi_leave_sleep_state will reenable specific GPEs later */ acpi_disable_all_gpes(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); suspend_nvs_restore(); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static int acpi_suspend_state_valid(suspend_state_t pm_state) { u32 acpi_state; switch (pm_state) { case PM_SUSPEND_ON: case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: acpi_state = acpi_suspend_states[pm_state]; return sleep_states[acpi_state]; default: return 0; } } static const struct platform_suspend_ops acpi_suspend_ops = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin, .prepare_late = acpi_pm_prepare, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, }; /** * acpi_suspend_begin_old - Set the target system sleep state to the * state associated with given @pm_state, if supported, and * execute the _PTS control method. This function is used if the * pre-ACPI 2.0 suspend ordering has been requested. */ static int acpi_suspend_begin_old(suspend_state_t pm_state) { int error = acpi_suspend_begin(pm_state); if (!error) error = __acpi_pm_prepare(); return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_suspend_ops acpi_suspend_ops_old = { .valid = acpi_suspend_state_valid, .begin = acpi_suspend_begin_old, .prepare_late = acpi_pm_pre_suspend, .enter = acpi_suspend_enter, .wake = acpi_pm_finish, .end = acpi_pm_end, .recover = acpi_pm_finish, }; static int acpi_freeze_begin(void) { acpi_scan_lock_acquire(); return 0; } static int acpi_freeze_prepare(void) { acpi_enable_wakeup_devices(ACPI_STATE_S0); acpi_enable_all_wakeup_gpes(); acpi_os_wait_events_complete(); enable_irq_wake(acpi_gbl_FADT.sci_interrupt); return 0; } static void acpi_freeze_restore(void) { acpi_disable_wakeup_devices(ACPI_STATE_S0); disable_irq_wake(acpi_gbl_FADT.sci_interrupt); acpi_enable_all_runtime_gpes(); } static void acpi_freeze_end(void) { acpi_scan_lock_release(); } static const struct platform_freeze_ops acpi_freeze_ops = { .begin = acpi_freeze_begin, .prepare = acpi_freeze_prepare, .restore = acpi_freeze_restore, .end = acpi_freeze_end, }; static void acpi_sleep_suspend_setup(void) { int i; for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) if (acpi_sleep_state_supported(i)) sleep_states[i] = 1; suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); freeze_set_ops(&acpi_freeze_ops); } #else /* !CONFIG_SUSPEND */ static inline void acpi_sleep_suspend_setup(void) {} #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATION static unsigned long s4_hardware_signature; static struct acpi_table_facs *facs; static bool nosigcheck; void __init acpi_no_s4_hw_signature(void) { nosigcheck = true; } static int acpi_hibernation_begin(void) { int error; error = nvs_nosave ? 0 : suspend_nvs_alloc(); if (!error) acpi_pm_start(ACPI_STATE_S4); return error; } static int acpi_hibernation_enter(void) { acpi_status status = AE_OK; ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ status = acpi_enter_sleep_state(ACPI_STATE_S4); /* Reprogram control registers */ acpi_leave_sleep_state_prep(ACPI_STATE_S4); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static void acpi_hibernation_leave(void) { /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. */ acpi_enable(); /* Reprogram control registers */ acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) pr_crit("ACPI: Hardware changed while hibernated, success doubtful!\n"); /* Restore the NVS memory area */ suspend_nvs_restore(); /* Allow EC transactions to happen. */ acpi_ec_unblock_transactions_early(); } static void acpi_pm_thaw(void) { acpi_ec_unblock_transactions(); acpi_enable_all_runtime_gpes(); } static const struct platform_hibernation_ops acpi_hibernation_ops = { .begin = acpi_hibernation_begin, .end = acpi_pm_end, .pre_snapshot = acpi_pm_prepare, .finish = acpi_pm_finish, .prepare = acpi_pm_prepare, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, }; /** * acpi_hibernation_begin_old - Set the target system sleep state to * ACPI_STATE_S4 and execute the _PTS control method. This * function is used if the pre-ACPI 2.0 suspend ordering has been * requested. */ static int acpi_hibernation_begin_old(void) { int error; /* * The _TTS object should always be evaluated before the _PTS object. * When the old_suspended_ordering is true, the _PTS object is * evaluated in the acpi_sleep_prepare. */ acpi_sleep_tts_switch(ACPI_STATE_S4); error = acpi_sleep_prepare(ACPI_STATE_S4); if (!error) { if (!nvs_nosave) error = suspend_nvs_alloc(); if (!error) { acpi_target_sleep_state = ACPI_STATE_S4; acpi_scan_lock_acquire(); } } return error; } /* * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has * been requested. */ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { .begin = acpi_hibernation_begin_old, .end = acpi_pm_end, .pre_snapshot = acpi_pm_pre_suspend, .prepare = acpi_pm_freeze, .finish = acpi_pm_finish, .enter = acpi_hibernation_enter, .leave = acpi_hibernation_leave, .pre_restore = acpi_pm_freeze, .restore_cleanup = acpi_pm_thaw, .recover = acpi_pm_finish, }; static void acpi_sleep_hibernate_setup(void) { if (!acpi_sleep_state_supported(ACPI_STATE_S4)) return; hibernation_set_ops(old_suspend_ordering ? &acpi_hibernation_ops_old : &acpi_hibernation_ops); sleep_states[ACPI_STATE_S4] = 1; if (nosigcheck) return; acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); if (facs) s4_hardware_signature = facs->hardware_signature; } #else /* !CONFIG_HIBERNATION */ static inline void acpi_sleep_hibernate_setup(void) {} #endif /* !CONFIG_HIBERNATION */ static void acpi_power_off_prepare(void) { /* Prepare to power off the system */ acpi_sleep_prepare(ACPI_STATE_S5); acpi_disable_all_gpes(); acpi_os_wait_events_complete(); } static void acpi_power_off(void) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); acpi_enter_sleep_state(ACPI_STATE_S5); } int __init acpi_sleep_init(void) { char supported[ACPI_S_STATE_COUNT * 3 + 1]; char *pos = supported; int i; acpi_sleep_dmi_check(); sleep_states[ACPI_STATE_S0] = 1; acpi_sleep_suspend_setup(); acpi_sleep_hibernate_setup(); if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; pm_power_off_prepare = acpi_power_off_prepare; pm_power_off = acpi_power_off; } supported[0] = 0; for (i = 0; i < ACPI_S_STATE_COUNT; i++) { if (sleep_states[i]) pos += sprintf(pos, " S%d", i); } pr_info(PREFIX "(supports%s)\n", supported); /* * Register the tts_notifier to reboot notifier list so that the _TTS * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); return 0; }
gpl-2.0
project-voodoo/android_kernel_samsung
drivers/media/dvb/frontends/dib3000mb.c
869
23971
/* * Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B * DiBcom (http://www.dibcom.fr/) * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) * * based on GPL code from DibCom, which has * * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * Acknowledgements * * Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver * sources, on which this driver (and the dvb-dibusb) are based. * * see Documentation/dvb/README.dibusb for more information * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "dib3000.h" #include "dib3000mb_priv.h" /* Version information */ #define DRIVER_VERSION "0.1" #define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator" #define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de" #ifdef CONFIG_DVB_DIBCOM_DEBUG static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-able))."); #endif #define deb_info(args...) dprintk(0x01,args) #define deb_i2c(args...) dprintk(0x02,args) #define deb_srch(args...) dprintk(0x04,args) #define deb_info(args...) dprintk(0x01,args) #define deb_xfer(args...) dprintk(0x02,args) #define deb_setf(args...) dprintk(0x04,args) #define deb_getf(args...) dprintk(0x08,args) #ifdef CONFIG_DVB_DIBCOM_DEBUG static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,2=i2c,4=srch (|-able))."); #endif static int dib3000_read_reg(struct dib3000_state *state, u16 reg) { u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff }; u8 rb[2]; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 }, }; if (i2c_transfer(state->i2c, msg, 2) != 2) deb_i2c("i2c read error\n"); deb_i2c("reading i2c bus (reg: %5d 0x%04x, val: %5d 0x%04x)\n",reg,reg, (rb[0] << 8) | rb[1],(rb[0] << 8) | rb[1]); return (rb[0] << 8) | rb[1]; } static int dib3000_write_reg(struct dib3000_state *state, u16 reg, u16 val) { u8 b[] = { (reg >> 8) & 0xff, reg & 0xff, (val >> 8) & 0xff, val & 0xff, }; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = b, .len = 4 } }; deb_i2c("writing i2c bus (reg: %5d 0x%04x, val: %5d 0x%04x)\n",reg,reg,val,val); return i2c_transfer(state->i2c,msg, 1) != 1 ? -EREMOTEIO : 0; } static int dib3000_search_status(u16 irq,u16 lock) { if (irq & 0x02) { if (lock & 0x01) { deb_srch("auto search succeeded\n"); return 1; // auto search succeeded } else { deb_srch("auto search not successful\n"); return 0; // auto search failed } } else if (irq & 0x01) { deb_srch("auto search failed\n"); return 0; // auto search failed } return -1; // try again } /* for auto search */ static u16 dib3000_seq[2][2][2] = /* fft,gua, inv */ { /* fft */ { /* gua */ { 0, 1 }, /* 0 0 { 0,1 } */ { 3, 9 }, /* 0 1 { 0,1 } */ }, { { 2, 5 }, /* 1 0 { 0,1 } */ { 6, 11 }, /* 1 1 { 0,1 } */ } }; static int dib3000mb_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep); static int dib3000mb_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep, int tuner) { struct dib3000_state* state = fe->demodulator_priv; struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm; fe_code_rate_t fe_cr = FEC_NONE; int search_state, seq; if (tuner && fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, fep); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); deb_setf("bandwidth: "); switch (ofdm->bandwidth) { case BANDWIDTH_8_MHZ: deb_setf("8 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz); break; case BANDWIDTH_7_MHZ: deb_setf("7 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz); break; case BANDWIDTH_6_MHZ: deb_setf("6 MHz\n"); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz); break; case BANDWIDTH_AUTO: return -EOPNOTSUPP; default: err("unknown bandwidth value."); return -EINVAL; } } wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); deb_setf("transmission mode: "); switch (ofdm->transmission_mode) { case TRANSMISSION_MODE_2K: deb_setf("2k\n"); wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); break; case TRANSMISSION_MODE_8K: deb_setf("8k\n"); wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); break; case TRANSMISSION_MODE_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } deb_setf("guard: "); switch (ofdm->guard_interval) { case GUARD_INTERVAL_1_32: deb_setf("1_32\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); break; case GUARD_INTERVAL_1_16: deb_setf("1_16\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); break; case GUARD_INTERVAL_1_8: deb_setf("1_8\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); break; case GUARD_INTERVAL_1_4: deb_setf("1_4\n"); wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); break; case GUARD_INTERVAL_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } deb_setf("inversion: "); switch (fep->inversion) { case INVERSION_OFF: deb_setf("off\n"); wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); break; case INVERSION_AUTO: deb_setf("auto "); break; case INVERSION_ON: deb_setf("on\n"); wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); break; default: return -EINVAL; } deb_setf("constellation: "); switch (ofdm->constellation) { case QPSK: deb_setf("qpsk\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); break; case QAM_16: deb_setf("qam16\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_16QAM); break; case QAM_64: deb_setf("qam64\n"); wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_64QAM); break; case QAM_AUTO: break; default: return -EINVAL; } deb_setf("hierarchy: "); switch (ofdm->hierarchy_information) { case HIERARCHY_NONE: deb_setf("none "); /* fall through */ case HIERARCHY_1: deb_setf("alpha=1\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1); break; case HIERARCHY_2: deb_setf("alpha=2\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_2); break; case HIERARCHY_4: deb_setf("alpha=4\n"); wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_4); break; case HIERARCHY_AUTO: deb_setf("alpha=auto\n"); break; default: return -EINVAL; } deb_setf("hierarchy: "); if (ofdm->hierarchy_information == HIERARCHY_NONE) { deb_setf("none\n"); wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF); wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP); fe_cr = ofdm->code_rate_HP; } else if (ofdm->hierarchy_information != HIERARCHY_AUTO) { deb_setf("on\n"); wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON); wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP); fe_cr = ofdm->code_rate_LP; } deb_setf("fec: "); switch (fe_cr) { case FEC_1_2: deb_setf("1_2\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_1_2); break; case FEC_2_3: deb_setf("2_3\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_2_3); break; case FEC_3_4: deb_setf("3_4\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_3_4); break; case FEC_5_6: deb_setf("5_6\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_5_6); break; case FEC_7_8: deb_setf("7_8\n"); wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_7_8); break; case FEC_NONE: deb_setf("none "); break; case FEC_AUTO: deb_setf("auto\n"); break; default: return -EINVAL; } seq = dib3000_seq [ofdm->transmission_mode == TRANSMISSION_MODE_AUTO] [ofdm->guard_interval == GUARD_INTERVAL_AUTO] [fep->inversion == INVERSION_AUTO]; deb_setf("seq? %d\n", seq); wr(DIB3000MB_REG_SEQ, seq); wr(DIB3000MB_REG_ISI, seq ? DIB3000MB_ISI_INHIBIT : DIB3000MB_ISI_ACTIVATE); if (ofdm->transmission_mode == TRANSMISSION_MODE_2K) { if (ofdm->guard_interval == GUARD_INTERVAL_1_8) { wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_2K_1_8); } else { wr(DIB3000MB_REG_SYNC_IMPROVEMENT, DIB3000MB_SYNC_IMPROVE_DEFAULT); } wr(DIB3000MB_REG_UNK_121, DIB3000MB_UNK_121_2K); } else { wr(DIB3000MB_REG_UNK_121, DIB3000MB_UNK_121_DEFAULT); } wr(DIB3000MB_REG_MOBILE_ALGO, DIB3000MB_MOBILE_ALGO_OFF); wr(DIB3000MB_REG_MOBILE_MODE_QAM, DIB3000MB_MOBILE_MODE_QAM_OFF); wr(DIB3000MB_REG_MOBILE_MODE, DIB3000MB_MOBILE_MODE_OFF); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_high); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_ACTIVATE); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AGC + DIB3000MB_RESTART_CTRL); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); /* wait for AGC lock */ msleep(70); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_low); /* something has to be auto searched */ if (ofdm->constellation == QAM_AUTO || ofdm->hierarchy_information == HIERARCHY_AUTO || fe_cr == FEC_AUTO || fep->inversion == INVERSION_AUTO) { int as_count=0; deb_setf("autosearch enabled.\n"); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_INHIBIT); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AUTO_SEARCH); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); while ((search_state = dib3000_search_status( rd(DIB3000MB_REG_AS_IRQ_PENDING), rd(DIB3000MB_REG_LOCK2_VALUE))) < 0 && as_count++ < 100) msleep(1); deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count); if (search_state == 1) { struct dvb_frontend_parameters feps; if (dib3000mb_get_frontend(fe, &feps) == 0) { deb_setf("reading tuning data from frontend succeeded.\n"); return dib3000mb_set_frontend(fe, &feps, 0); } } } else { wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_CTRL); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_OFF); } return 0; } static int dib3000mb_fe_init(struct dvb_frontend* fe, int mobile_mode) { struct dib3000_state* state = fe->demodulator_priv; deb_info("dib3000mb is getting up.\n"); wr(DIB3000MB_REG_POWER_CONTROL, DIB3000MB_POWER_UP); wr(DIB3000MB_REG_RESTART, DIB3000MB_RESTART_AGC); wr(DIB3000MB_REG_RESET_DEVICE, DIB3000MB_RESET_DEVICE); wr(DIB3000MB_REG_RESET_DEVICE, DIB3000MB_RESET_DEVICE_RST); wr(DIB3000MB_REG_CLOCK, DIB3000MB_CLOCK_DEFAULT); wr(DIB3000MB_REG_ELECT_OUT_MODE, DIB3000MB_ELECT_OUT_MODE_ON); wr(DIB3000MB_REG_DDS_FREQ_MSB, DIB3000MB_DDS_FREQ_MSB); wr(DIB3000MB_REG_DDS_FREQ_LSB, DIB3000MB_DDS_FREQ_LSB); wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]); wr_foreach(dib3000mb_reg_impulse_noise, dib3000mb_impulse_noise_values[DIB3000MB_IMPNOISE_OFF]); wr_foreach(dib3000mb_reg_agc_gain, dib3000mb_default_agc_gain); wr(DIB3000MB_REG_PHASE_NOISE, DIB3000MB_PHASE_NOISE_DEFAULT); wr_foreach(dib3000mb_reg_phase_noise, dib3000mb_default_noise_phase); wr_foreach(dib3000mb_reg_lock_duration, dib3000mb_default_lock_duration); wr_foreach(dib3000mb_reg_agc_bandwidth, dib3000mb_agc_bandwidth_low); wr(DIB3000MB_REG_LOCK0_MASK, DIB3000MB_LOCK0_DEFAULT); wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); wr(DIB3000MB_REG_LOCK2_MASK, DIB3000MB_LOCK2_DEFAULT); wr(DIB3000MB_REG_SEQ, dib3000_seq[1][1][1]); wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz); wr(DIB3000MB_REG_UNK_68, DIB3000MB_UNK_68); wr(DIB3000MB_REG_UNK_69, DIB3000MB_UNK_69); wr(DIB3000MB_REG_UNK_71, DIB3000MB_UNK_71); wr(DIB3000MB_REG_UNK_77, DIB3000MB_UNK_77); wr(DIB3000MB_REG_UNK_78, DIB3000MB_UNK_78); wr(DIB3000MB_REG_ISI, DIB3000MB_ISI_INHIBIT); wr(DIB3000MB_REG_UNK_92, DIB3000MB_UNK_92); wr(DIB3000MB_REG_UNK_96, DIB3000MB_UNK_96); wr(DIB3000MB_REG_UNK_97, DIB3000MB_UNK_97); wr(DIB3000MB_REG_UNK_106, DIB3000MB_UNK_106); wr(DIB3000MB_REG_UNK_107, DIB3000MB_UNK_107); wr(DIB3000MB_REG_UNK_108, DIB3000MB_UNK_108); wr(DIB3000MB_REG_UNK_122, DIB3000MB_UNK_122); wr(DIB3000MB_REG_MOBILE_MODE_QAM, DIB3000MB_MOBILE_MODE_QAM_OFF); wr(DIB3000MB_REG_BERLEN, DIB3000MB_BERLEN_DEFAULT); wr_foreach(dib3000mb_reg_filter_coeffs, dib3000mb_filter_coeffs); wr(DIB3000MB_REG_MOBILE_ALGO, DIB3000MB_MOBILE_ALGO_ON); wr(DIB3000MB_REG_MULTI_DEMOD_MSB, DIB3000MB_MULTI_DEMOD_MSB); wr(DIB3000MB_REG_MULTI_DEMOD_LSB, DIB3000MB_MULTI_DEMOD_LSB); wr(DIB3000MB_REG_OUTPUT_MODE, DIB3000MB_OUTPUT_MODE_SLAVE); wr(DIB3000MB_REG_FIFO_142, DIB3000MB_FIFO_142); wr(DIB3000MB_REG_MPEG2_OUT_MODE, DIB3000MB_MPEG2_OUT_MODE_188); wr(DIB3000MB_REG_PID_PARSE, DIB3000MB_PID_PARSE_ACTIVATE); wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_INHIBIT); wr(DIB3000MB_REG_FIFO_146, DIB3000MB_FIFO_146); wr(DIB3000MB_REG_FIFO_147, DIB3000MB_FIFO_147); wr(DIB3000MB_REG_DATA_IN_DIVERSITY, DIB3000MB_DATA_DIVERSITY_IN_OFF); return 0; } static int dib3000mb_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { struct dib3000_state* state = fe->demodulator_priv; struct dvb_ofdm_parameters *ofdm = &fep->u.ofdm; fe_code_rate_t *cr; u16 tps_val; int inv_test1,inv_test2; u32 dds_val, threshold = 0x800000; if (!rd(DIB3000MB_REG_TPS_LOCK)) return 0; dds_val = ((rd(DIB3000MB_REG_DDS_VALUE_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_VALUE_LSB); deb_getf("DDS_VAL: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB)); if (dds_val < threshold) inv_test1 = 0; else if (dds_val == threshold) inv_test1 = 1; else inv_test1 = 2; dds_val = ((rd(DIB3000MB_REG_DDS_FREQ_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_FREQ_LSB); deb_getf("DDS_FREQ: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB)); if (dds_val < threshold) inv_test2 = 0; else if (dds_val == threshold) inv_test2 = 1; else inv_test2 = 2; fep->inversion = ((inv_test2 == 2) && (inv_test1==1 || inv_test1==0)) || ((inv_test2 == 0) && (inv_test1==1 || inv_test1==2)) ? INVERSION_ON : INVERSION_OFF; deb_getf("inversion %d %d, %d\n", inv_test2, inv_test1, fep->inversion); switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) { case DIB3000_CONSTELLATION_QPSK: deb_getf("QPSK "); ofdm->constellation = QPSK; break; case DIB3000_CONSTELLATION_16QAM: deb_getf("QAM16 "); ofdm->constellation = QAM_16; break; case DIB3000_CONSTELLATION_64QAM: deb_getf("QAM64 "); ofdm->constellation = QAM_64; break; default: err("Unexpected constellation returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); if (rd(DIB3000MB_REG_TPS_HRCH)) { deb_getf("HRCH ON\n"); cr = &ofdm->code_rate_LP; ofdm->code_rate_HP = FEC_NONE; switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) { case DIB3000_ALPHA_0: deb_getf("HIERARCHY_NONE "); ofdm->hierarchy_information = HIERARCHY_NONE; break; case DIB3000_ALPHA_1: deb_getf("HIERARCHY_1 "); ofdm->hierarchy_information = HIERARCHY_1; break; case DIB3000_ALPHA_2: deb_getf("HIERARCHY_2 "); ofdm->hierarchy_information = HIERARCHY_2; break; case DIB3000_ALPHA_4: deb_getf("HIERARCHY_4 "); ofdm->hierarchy_information = HIERARCHY_4; break; default: err("Unexpected ALPHA value returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_LP); } else { deb_getf("HRCH OFF\n"); cr = &ofdm->code_rate_HP; ofdm->code_rate_LP = FEC_NONE; ofdm->hierarchy_information = HIERARCHY_NONE; tps_val = rd(DIB3000MB_REG_TPS_CODE_RATE_HP); } switch (tps_val) { case DIB3000_FEC_1_2: deb_getf("FEC_1_2 "); *cr = FEC_1_2; break; case DIB3000_FEC_2_3: deb_getf("FEC_2_3 "); *cr = FEC_2_3; break; case DIB3000_FEC_3_4: deb_getf("FEC_3_4 "); *cr = FEC_3_4; break; case DIB3000_FEC_5_6: deb_getf("FEC_5_6 "); *cr = FEC_4_5; break; case DIB3000_FEC_7_8: deb_getf("FEC_7_8 "); *cr = FEC_7_8; break; default: err("Unexpected FEC returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n",tps_val); switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) { case DIB3000_GUARD_TIME_1_32: deb_getf("GUARD_INTERVAL_1_32 "); ofdm->guard_interval = GUARD_INTERVAL_1_32; break; case DIB3000_GUARD_TIME_1_16: deb_getf("GUARD_INTERVAL_1_16 "); ofdm->guard_interval = GUARD_INTERVAL_1_16; break; case DIB3000_GUARD_TIME_1_8: deb_getf("GUARD_INTERVAL_1_8 "); ofdm->guard_interval = GUARD_INTERVAL_1_8; break; case DIB3000_GUARD_TIME_1_4: deb_getf("GUARD_INTERVAL_1_4 "); ofdm->guard_interval = GUARD_INTERVAL_1_4; break; default: err("Unexpected Guard Time returned by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) { case DIB3000_TRANSMISSION_MODE_2K: deb_getf("TRANSMISSION_MODE_2K "); ofdm->transmission_mode = TRANSMISSION_MODE_2K; break; case DIB3000_TRANSMISSION_MODE_8K: deb_getf("TRANSMISSION_MODE_8K "); ofdm->transmission_mode = TRANSMISSION_MODE_8K; break; default: err("unexpected transmission mode return by TPS (%d)", tps_val); break; } deb_getf("TPS: %d\n", tps_val); return 0; } static int dib3000mb_read_status(struct dvb_frontend* fe, fe_status_t *stat) { struct dib3000_state* state = fe->demodulator_priv; *stat = 0; if (rd(DIB3000MB_REG_AGC_LOCK)) *stat |= FE_HAS_SIGNAL; if (rd(DIB3000MB_REG_CARRIER_LOCK)) *stat |= FE_HAS_CARRIER; if (rd(DIB3000MB_REG_VIT_LCK)) *stat |= FE_HAS_VITERBI; if (rd(DIB3000MB_REG_TS_SYNC_LOCK)) *stat |= (FE_HAS_SYNC | FE_HAS_LOCK); deb_getf("actual status is %2x\n",*stat); deb_getf("autoval: tps: %d, qam: %d, hrch: %d, alpha: %d, hp: %d, lp: %d, guard: %d, fft: %d cell: %d\n", rd(DIB3000MB_REG_TPS_LOCK), rd(DIB3000MB_REG_TPS_QAM), rd(DIB3000MB_REG_TPS_HRCH), rd(DIB3000MB_REG_TPS_VIT_ALPHA), rd(DIB3000MB_REG_TPS_CODE_RATE_HP), rd(DIB3000MB_REG_TPS_CODE_RATE_LP), rd(DIB3000MB_REG_TPS_GUARD_TIME), rd(DIB3000MB_REG_TPS_FFT), rd(DIB3000MB_REG_TPS_CELL_ID)); //*stat = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK; return 0; } static int dib3000mb_read_ber(struct dvb_frontend* fe, u32 *ber) { struct dib3000_state* state = fe->demodulator_priv; *ber = ((rd(DIB3000MB_REG_BER_MSB) << 16) | rd(DIB3000MB_REG_BER_LSB)); return 0; } /* see dib3000-watch dvb-apps for exact calcuations of signal_strength and snr */ static int dib3000mb_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct dib3000_state* state = fe->demodulator_priv; *strength = rd(DIB3000MB_REG_SIGNAL_POWER) * 0xffff / 0x170; return 0; } static int dib3000mb_read_snr(struct dvb_frontend* fe, u16 *snr) { struct dib3000_state* state = fe->demodulator_priv; short sigpow = rd(DIB3000MB_REG_SIGNAL_POWER); int icipow = ((rd(DIB3000MB_REG_NOISE_POWER_MSB) & 0xff) << 16) | rd(DIB3000MB_REG_NOISE_POWER_LSB); *snr = (sigpow << 8) / ((icipow > 0) ? icipow : 1); return 0; } static int dib3000mb_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { struct dib3000_state* state = fe->demodulator_priv; *unc = rd(DIB3000MB_REG_PACKET_ERROR_RATE); return 0; } static int dib3000mb_sleep(struct dvb_frontend* fe) { struct dib3000_state* state = fe->demodulator_priv; deb_info("dib3000mb is going to bed.\n"); wr(DIB3000MB_REG_POWER_CONTROL, DIB3000MB_POWER_DOWN); return 0; } static int dib3000mb_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int dib3000mb_fe_init_nonmobile(struct dvb_frontend* fe) { return dib3000mb_fe_init(fe, 0); } static int dib3000mb_set_frontend_and_tuner(struct dvb_frontend* fe, struct dvb_frontend_parameters *fep) { return dib3000mb_set_frontend(fe, fep, 1); } static void dib3000mb_release(struct dvb_frontend* fe) { struct dib3000_state *state = fe->demodulator_priv; kfree(state); } /* pid filter and transfer stuff */ static int dib3000mb_pid_control(struct dvb_frontend *fe,int index, int pid,int onoff) { struct dib3000_state *state = fe->demodulator_priv; pid = (onoff ? pid | DIB3000_ACTIVATE_PID_FILTERING : 0); wr(index+DIB3000MB_REG_FIRST_PID,pid); return 0; } static int dib3000mb_fifo_control(struct dvb_frontend *fe, int onoff) { struct dib3000_state *state = fe->demodulator_priv; deb_xfer("%s fifo\n",onoff ? "enabling" : "disabling"); if (onoff) { wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_ACTIVATE); } else { wr(DIB3000MB_REG_FIFO, DIB3000MB_FIFO_INHIBIT); } return 0; } static int dib3000mb_pid_parse(struct dvb_frontend *fe, int onoff) { struct dib3000_state *state = fe->demodulator_priv; deb_xfer("%s pid parsing\n",onoff ? "enabling" : "disabling"); wr(DIB3000MB_REG_PID_PARSE,onoff); return 0; } static int dib3000mb_tuner_pass_ctrl(struct dvb_frontend *fe, int onoff, u8 pll_addr) { struct dib3000_state *state = fe->demodulator_priv; if (onoff) { wr(DIB3000MB_REG_TUNER, DIB3000_TUNER_WRITE_ENABLE(pll_addr)); } else { wr(DIB3000MB_REG_TUNER, DIB3000_TUNER_WRITE_DISABLE(pll_addr)); } return 0; } static struct dvb_frontend_ops dib3000mb_ops; struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config, struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops) { struct dib3000_state* state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct dib3000_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->i2c = i2c; memcpy(&state->config,config,sizeof(struct dib3000_config)); /* check for the correct demod */ if (rd(DIB3000_REG_MANUFACTOR_ID) != DIB3000_I2C_ID_DIBCOM) goto error; if (rd(DIB3000_REG_DEVICE_ID) != DIB3000MB_DEVICE_ID) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &dib3000mb_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; /* set the xfer operations */ xfer_ops->pid_parse = dib3000mb_pid_parse; xfer_ops->fifo_ctrl = dib3000mb_fifo_control; xfer_ops->pid_ctrl = dib3000mb_pid_control; xfer_ops->tuner_pass_ctrl = dib3000mb_tuner_pass_ctrl; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops dib3000mb_ops = { .info = { .name = "DiBcom 3000M-B DVB-T", .type = FE_OFDM, .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib3000mb_release, .init = dib3000mb_fe_init_nonmobile, .sleep = dib3000mb_sleep, .set_frontend = dib3000mb_set_frontend_and_tuner, .get_frontend = dib3000mb_get_frontend, .get_tune_settings = dib3000mb_fe_get_tune_settings, .read_status = dib3000mb_read_status, .read_ber = dib3000mb_read_ber, .read_signal_strength = dib3000mb_read_signal_strength, .read_snr = dib3000mb_read_snr, .read_ucblocks = dib3000mb_read_unc_blocks, }; MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(dib3000mb_attach);
gpl-2.0
houst0nn/android_kernel_lge_g3
drivers/mtd/nand/nand_ids.c
2149
7553
/* * drivers/mtd/nandids.c * * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/mtd/nand.h> /* * Chip ID list * * Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, * options * * Pagesize; 0, 256, 512 * 0 get this information from the extended chip ID + 256 256 Byte page size * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { #ifdef CONFIG_MTD_NAND_MUSEUM_IDS {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, #endif {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, /* * These are the new chips with large page size. The pagesize and the * erasesize is determined from the extended id bytes */ #define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) #define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS}, {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16}, /* 1 Gigabit */ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16}, /* 2 Gigabit */ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS}, {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS}, {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16}, {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16}, /* 4 Gigabit */ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS}, {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS}, {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16}, {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16}, /* 8 Gigabit */ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS}, {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS}, {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16}, {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16}, /* 16 Gigabit */ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS}, {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS}, {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16}, {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, /* 32 Gigabit */ {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS}, {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16}, {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16}, /* 64 Gigabit */ {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS}, {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS}, {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16}, {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16}, /* 128 Gigabit */ {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS}, {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS}, {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16}, {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16}, /* 256 Gigabit */ {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS}, {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS}, {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16}, {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16}, /* 512 Gigabit */ {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS}, {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS}, {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16}, {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16}, /* * Renesas AND 1 Gigabit. Those chips do not support extended id and * have a strange page/block layout ! The chosen minimum erasesize is * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page * planes 1 block = 2 pages, but due to plane arrangement the blocks * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would * increase the eraseblock size so we chose a combined one which can be * erased in one go There are more speed improvements for reads and * writes possible, but not implemented now */ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH }, {"NAND 4GiB 1,8V 8-bit", 0xAC, 2048, 4096, 0x20000, 0}, {NULL,} }; /* * Manufacturer ID list */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {NAND_MFR_HYNIX, "Hynix"}, {NAND_MFR_MICRON, "Micron"}, {NAND_MFR_AMD, "AMD"}, {NAND_MFR_MACRONIX, "Macronix"}, {0x0, "Unknown"} }; EXPORT_SYMBOL(nand_manuf_ids); EXPORT_SYMBOL(nand_flash_ids); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION("Nand device & manufacturer IDs");
gpl-2.0
NovaFusion/twrp_kernel
drivers/isdn/i4l/isdn_ppp.c
3173
80387
/* $Id: isdn_ppp.c,v 1.1.2.3 2004/02/10 01:07:13 keil Exp $ * * Linux ISDN subsystem, functions for synchronous PPP (linklevel). * * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/isdn.h> #include <linux/poll.h> #include <linux/ppp-comp.h> #include <linux/slab.h> #ifdef CONFIG_IPPP_FILTER #include <linux/filter.h> #endif #include "isdn_common.h" #include "isdn_ppp.h" #include "isdn_net.h" #ifndef PPP_IPX #define PPP_IPX 0x002b #endif /* Prototypes */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot); static int isdn_ppp_closewait(int slot); static void isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb, int proto); static int isdn_ppp_if_get_unit(char *namebuf); static int isdn_ppp_set_compressor(struct ippp_struct *is,struct isdn_ppp_comp_data *); static struct sk_buff *isdn_ppp_decompress(struct sk_buff *, struct ippp_struct *,struct ippp_struct *,int *proto); static void isdn_ppp_receive_ccp(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb,int proto); static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in,int *proto, struct ippp_struct *is,struct ippp_struct *master,int type); static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb); /* New CCP stuff */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is); static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len); static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free(struct ippp_struct *is); static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_timer_callback(unsigned long closure); static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id); static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp); static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id); #ifdef CONFIG_ISDN_MPP static ippp_bundle * isdn_ppp_bundle_arr = NULL; static int isdn_ppp_mp_bundle_array_init(void); static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ); static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb); static void isdn_ppp_mp_cleanup( isdn_net_local * lp ); static int isdn_ppp_bundle(struct ippp_struct *, int unit); #endif /* CONFIG_ISDN_MPP */ char *isdn_ppp_revision = "$Revision: 1.1.2.3 $"; static struct ippp_struct *ippp_table[ISDN_MAX_CHANNELS]; static struct isdn_ppp_compressor *ipc_head = NULL; /* * frame log (debug) */ static void isdn_ppp_frame_log(char *info, char *data, int len, int maxlen,int unit,int slot) { int cnt, j, i; char buf[80]; if (len < maxlen) maxlen = len; for (i = 0, cnt = 0; cnt < maxlen; i++) { for (j = 0; j < 16 && cnt < maxlen; j++, cnt++) sprintf(buf + j * 3, "%02x ", (unsigned char) data[cnt]); printk(KERN_DEBUG "[%d/%d].%s[%d]: %s\n",unit,slot, info, i, buf); } } /* * unbind isdn_net_local <=> ippp-device * note: it can happen, that we hangup/free the master before the slaves * in this case we bind another lp to the master device */ int isdn_ppp_free(isdn_net_local * lp) { struct ippp_struct *is; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return 0; } #ifdef CONFIG_ISDN_MPP spin_lock(&lp->netdev->pb->lock); #endif isdn_net_rm_from_bundle(lp); #ifdef CONFIG_ISDN_MPP if (lp->netdev->pb->ref_ct == 1) /* last link in queue? */ isdn_ppp_mp_cleanup(lp); lp->netdev->pb->ref_ct--; spin_unlock(&lp->netdev->pb->lock); #endif /* CONFIG_ISDN_MPP */ if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n", __func__, lp->ppp_slot); return 0; } is = ippp_table[lp->ppp_slot]; if ((is->state & IPPP_CONNECT)) isdn_ppp_closewait(lp->ppp_slot); /* force wakeup on ippp device */ else if (is->state & IPPP_ASSIGNED) is->state = IPPP_OPEN; /* fallback to 'OPEN but not ASSIGNED' state */ if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_free %d %lx %lx\n", lp->ppp_slot, (long) lp, (long) is->lp); is->lp = NULL; /* link is down .. set lp to NULL */ lp->ppp_slot = -1; /* is this OK ?? */ return 0; } /* * bind isdn_net_local <=> ippp-device * * This function is allways called with holding dev->lock so * no additional lock is needed */ int isdn_ppp_bind(isdn_net_local * lp) { int i; int unit = 0; struct ippp_struct *is; int retval; if (lp->pppbind < 0) { /* device bounded to ippp device ? */ isdn_net_dev *net_dev = dev->netdev; char exclusive[ISDN_MAX_CHANNELS]; /* exclusive flags */ memset(exclusive, 0, ISDN_MAX_CHANNELS); while (net_dev) { /* step through net devices to find exclusive minors */ isdn_net_local *lp = net_dev->local; if (lp->pppbind >= 0) exclusive[lp->pppbind] = 1; net_dev = net_dev->next; } /* * search a free device / slot */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->state == IPPP_OPEN && !exclusive[ippp_table[i]->minor]) { /* OPEN, but not connected! */ break; } } } else { for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (ippp_table[i]->minor == lp->pppbind && (ippp_table[i]->state & IPPP_OPEN) == IPPP_OPEN) break; } } if (i >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "isdn_ppp_bind: Can't find a (free) connection to the ipppd daemon.\n"); retval = -1; goto out; } /* get unit number from interface name .. ugly! */ unit = isdn_ppp_if_get_unit(lp->netdev->dev->name); if (unit < 0) { printk(KERN_ERR "isdn_ppp_bind: illegal interface name %s.\n", lp->netdev->dev->name); retval = -1; goto out; } lp->ppp_slot = i; is = ippp_table[i]; is->lp = lp; is->unit = unit; is->state = IPPP_OPEN | IPPP_ASSIGNED; /* assigned to a netdevice but not connected */ #ifdef CONFIG_ISDN_MPP retval = isdn_ppp_mp_init(lp, NULL); if (retval < 0) goto out; #endif /* CONFIG_ISDN_MPP */ retval = lp->ppp_slot; out: return retval; } /* * kick the ipppd on the device * (wakes up daemon after B-channel connect) */ void isdn_ppp_wakeup_daemon(isdn_net_local * lp) { if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK; wake_up_interruptible(&ippp_table[lp->ppp_slot]->wq); } /* * there was a hangup on the netdevice * force wakeup of the ippp device * go into 'device waits for release' state */ static int isdn_ppp_closewait(int slot) { struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return 0; } is = ippp_table[slot]; if (is->state) wake_up_interruptible(&is->wq); is->state = IPPP_CLOSEWAIT; return 1; } /* * isdn_ppp_find_slot / isdn_ppp_free_slot */ static int isdn_ppp_get_slot(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!ippp_table[i]->state) return i; } return -1; } /* * isdn_ppp_open */ int isdn_ppp_open(int min, struct file *file) { int slot; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return -ENODEV; slot = isdn_ppp_get_slot(); if (slot < 0) { return -EBUSY; } is = file->private_data = ippp_table[slot]; printk(KERN_DEBUG "ippp, open, slot: %d, minor: %d, state: %04x\n", slot, min, is->state); /* compression stuff */ is->link_compressor = is->compressor = NULL; is->link_decompressor = is->decompressor = NULL; is->link_comp_stat = is->comp_stat = NULL; is->link_decomp_stat = is->decomp_stat = NULL; is->compflags = 0; is->reset = isdn_ppp_ccp_reset_alloc(is); is->lp = NULL; is->mp_seqno = 0; /* MP sequence number */ is->pppcfg = 0; /* ppp configuration */ is->mpppcfg = 0; /* mppp configuration */ is->last_link_seqno = -1; /* MP: maybe set to Bundle-MIN, when joining a bundle ?? */ is->unit = -1; /* set, when we have our interface */ is->mru = 1524; /* MRU, default 1524 */ is->maxcid = 16; /* VJ: maxcid */ is->tk = current; init_waitqueue_head(&is->wq); is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; is->minor = min; #ifdef CONFIG_ISDN_PPP_VJ /* * VJ header compression init */ is->slcomp = slhc_init(16, 16); /* not necessary for 2. link in bundle */ #endif #ifdef CONFIG_IPPP_FILTER is->pass_filter = NULL; is->active_filter = NULL; #endif is->state = IPPP_OPEN; return 0; } /* * release ippp device */ void isdn_ppp_release(int min, struct file *file) { int i; struct ippp_struct *is; if (min < 0 || min >= ISDN_MAX_CHANNELS) return; is = file->private_data; if (!is) { printk(KERN_ERR "%s: no file->private_data\n", __func__); return; } if (is->debug & 0x1) printk(KERN_DEBUG "ippp: release, minor: %d %lx\n", min, (long) is->lp); if (is->lp) { /* a lp address says: this link is still up */ isdn_net_dev *p = is->lp->netdev; if (!p) { printk(KERN_ERR "%s: no lp->netdev\n", __func__); return; } is->state &= ~IPPP_CONNECT; /* -> effect: no call of wakeup */ /* * isdn_net_hangup() calls isdn_ppp_free() * isdn_ppp_free() sets is->lp to NULL and lp->ppp_slot to -1 * removing the IPPP_CONNECT flag omits calling of isdn_ppp_wakeup_daemon() */ isdn_net_hangup(p->dev); } for (i = 0; i < NUM_RCV_BUFFS; i++) { kfree(is->rq[i].buf); is->rq[i].buf = NULL; } is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ is->last = is->rq; #ifdef CONFIG_ISDN_PPP_VJ /* TODO: if this was the previous master: link the slcomp to the new master */ slhc_free(is->slcomp); is->slcomp = NULL; #endif #ifdef CONFIG_IPPP_FILTER kfree(is->pass_filter); is->pass_filter = NULL; kfree(is->active_filter); is->active_filter = NULL; #endif /* TODO: if this was the previous master: link the stuff to the new master */ if(is->comp_stat) is->compressor->free(is->comp_stat); if(is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); if(is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); if(is->decomp_stat) is->decompressor->free(is->decomp_stat); is->compressor = is->link_compressor = NULL; is->decompressor = is->link_decompressor = NULL; is->comp_stat = is->link_comp_stat = NULL; is->decomp_stat = is->link_decomp_stat = NULL; /* Clean up if necessary */ if(is->reset) isdn_ppp_ccp_reset_free(is); /* this slot is ready for new connections */ is->state = 0; } /* * get_arg .. ioctl helper */ static int get_arg(void __user *b, void *val, int len) { if (len <= 0) len = sizeof(void *); if (copy_from_user(val, b, len)) return -EFAULT; return 0; } /* * set arg .. ioctl helper */ static int set_arg(void __user *b, void *val,int len) { if(len <= 0) len = sizeof(void *); if (copy_to_user(b, val, len)) return -EFAULT; return 0; } #ifdef CONFIG_IPPP_FILTER static int get_filter(void __user *arg, struct sock_filter **p) { struct sock_fprog uprog; struct sock_filter *code = NULL; int len, err; if (copy_from_user(&uprog, arg, sizeof(uprog))) return -EFAULT; if (!uprog.len) { *p = NULL; return 0; } /* uprog.len is unsigned short, so no overflow here */ len = uprog.len * sizeof(struct sock_filter); code = memdup_user(uprog.filter, len); if (IS_ERR(code)) return PTR_ERR(code); err = sk_chk_filter(code, uprog.len); if (err) { kfree(code); return err; } *p = code; return uprog.len; } #endif /* CONFIG_IPPP_FILTER */ /* * ippp device ioctl */ int isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg) { unsigned long val; int r,i,j; struct ippp_struct *is; isdn_net_local *lp; struct isdn_ppp_comp_data data; void __user *argp = (void __user *)arg; is = file->private_data; lp = is->lp; if (is->debug & 0x1) printk(KERN_DEBUG "isdn_ppp_ioctl: minor: %d cmd: %x state: %x\n", min, cmd, is->state); if (!(is->state & IPPP_OPEN)) return -EINVAL; switch (cmd) { case PPPIOCBUNDLE: #ifdef CONFIG_ISDN_MPP if (!(is->state & IPPP_CONNECT)) return -EINVAL; if ((r = get_arg(argp, &val, sizeof(val) ))) return r; printk(KERN_DEBUG "iPPP-bundle: minor: %d, slave unit: %d, master unit: %d\n", (int) min, (int) is->unit, (int) val); return isdn_ppp_bundle(is, val); #else return -1; #endif break; case PPPIOCGUNIT: /* get ppp/isdn unit number */ if ((r = set_arg(argp, &is->unit, sizeof(is->unit) ))) return r; break; case PPPIOCGIFNAME: if(!lp) return -EINVAL; if ((r = set_arg(argp, lp->netdev->dev->name, strlen(lp->netdev->dev->name)))) return r; break; case PPPIOCGMPFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->mpppcfg, sizeof(is->mpppcfg) ))) return r; break; case PPPIOCSMPFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val) ))) return r; is->mpppcfg = val; break; case PPPIOCGFLAGS: /* get configuration flags */ if ((r = set_arg(argp, &is->pppcfg,sizeof(is->pppcfg) ))) return r; break; case PPPIOCSFLAGS: /* set configuration flags */ if ((r = get_arg(argp, &val, sizeof(val) ))) { return r; } if (val & SC_ENABLE_IP && !(is->pppcfg & SC_ENABLE_IP) && (is->state & IPPP_CONNECT)) { if (lp) { /* OK .. we are ready to send buffers */ is->pppcfg = val; /* isdn_ppp_xmit test for SC_ENABLE_IP !!! */ netif_wake_queue(lp->netdev->dev); break; } } is->pppcfg = val; break; case PPPIOCGIDLE: /* get idle time information */ if (lp) { struct ppp_idle pidle; pidle.xmit_idle = pidle.recv_idle = lp->huptimer; if ((r = set_arg(argp, &pidle,sizeof(struct ppp_idle)))) return r; } break; case PPPIOCSMRU: /* set receive unit size for PPP */ if ((r = get_arg(argp, &val, sizeof(val) ))) return r; is->mru = val; break; case PPPIOCSMPMRU: break; case PPPIOCSMPMTU: break; case PPPIOCSMAXCID: /* set the maximum compression slot id */ if ((r = get_arg(argp, &val, sizeof(val) ))) return r; val++; if (is->maxcid != val) { #ifdef CONFIG_ISDN_PPP_VJ struct slcompress *sltmp; #endif if (is->debug & 0x1) printk(KERN_DEBUG "ippp, ioctl: changed MAXCID to %ld\n", val); is->maxcid = val; #ifdef CONFIG_ISDN_PPP_VJ sltmp = slhc_init(16, val); if (!sltmp) { printk(KERN_ERR "ippp, can't realloc slhc struct\n"); return -ENOMEM; } if (is->slcomp) slhc_free(is->slcomp); is->slcomp = sltmp; #endif } break; case PPPIOCGDEBUG: if ((r = set_arg(argp, &is->debug, sizeof(is->debug) ))) return r; break; case PPPIOCSDEBUG: if ((r = get_arg(argp, &val, sizeof(val) ))) return r; is->debug = val; break; case PPPIOCGCOMPRESSORS: { unsigned long protos[8] = {0,}; struct isdn_ppp_compressor *ipc = ipc_head; while(ipc) { j = ipc->num / (sizeof(long)*8); i = ipc->num % (sizeof(long)*8); if(j < 8) protos[j] |= (0x1<<i); ipc = ipc->next; } if ((r = set_arg(argp,protos,8*sizeof(long) ))) return r; } break; case PPPIOCSCOMPRESSOR: if ((r = get_arg(argp, &data, sizeof(struct isdn_ppp_comp_data)))) return r; return isdn_ppp_set_compressor(is, &data); case PPPIOCGCALLINFO: { struct pppcallinfo pci; memset((char *) &pci,0,sizeof(struct pppcallinfo)); if(lp) { strncpy(pci.local_num,lp->msn,63); if(lp->dial) { strncpy(pci.remote_num,lp->dial->num,63); } pci.charge_units = lp->charge; if(lp->outgoing) pci.calltype = CALLTYPE_OUTGOING; else pci.calltype = CALLTYPE_INCOMING; if(lp->flags & ISDN_NET_CALLBACK) pci.calltype |= CALLTYPE_CALLBACK; } return set_arg(argp,&pci,sizeof(struct pppcallinfo)); } #ifdef CONFIG_IPPP_FILTER case PPPIOCSPASS: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->pass_filter); is->pass_filter = code; is->pass_len = len; break; } case PPPIOCSACTIVE: { struct sock_filter *code; int len = get_filter(argp, &code); if (len < 0) return len; kfree(is->active_filter); is->active_filter = code; is->active_len = len; break; } #endif /* CONFIG_IPPP_FILTER */ default: break; } return 0; } unsigned int isdn_ppp_poll(struct file *file, poll_table * wait) { u_int mask; struct ippp_buf_queue *bf, *bl; u_long flags; struct ippp_struct *is; is = file->private_data; if (is->debug & 0x2) printk(KERN_DEBUG "isdn_ppp_poll: minor: %d\n", iminor(file->f_path.dentry->d_inode)); /* just registers wait_queue hook. This doesn't really wait. */ poll_wait(file, &is->wq, wait); if (!(is->state & IPPP_OPEN)) { if(is->state == IPPP_CLOSEWAIT) return POLLHUP; printk(KERN_DEBUG "isdn_ppp: device not open\n"); return POLLERR; } /* we're always ready to send .. */ mask = POLLOUT | POLLWRNORM; spin_lock_irqsave(&is->buflock, flags); bl = is->last; bf = is->first; /* * if IPPP_NOBLOCK is set we return even if we have nothing to read */ if (bf->next != bl || (is->state & IPPP_NOBLOCK)) { is->state &= ~IPPP_NOBLOCK; mask |= POLLIN | POLLRDNORM; } spin_unlock_irqrestore(&is->buflock, flags); return mask; } /* * fill up isdn_ppp_read() queue .. */ static int isdn_ppp_fill_rq(unsigned char *buf, int len, int proto, int slot) { struct ippp_buf_queue *bf, *bl; u_long flags; u_char *nbuf; struct ippp_struct *is; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_WARNING "ippp: illegal slot(%d).\n", slot); return 0; } is = ippp_table[slot]; if (!(is->state & IPPP_CONNECT)) { printk(KERN_DEBUG "ippp: device not activated.\n"); return 0; } nbuf = kmalloc(len + 4, GFP_ATOMIC); if (!nbuf) { printk(KERN_WARNING "ippp: Can't alloc buf\n"); return 0; } nbuf[0] = PPP_ALLSTATIONS; nbuf[1] = PPP_UI; nbuf[2] = proto >> 8; nbuf[3] = proto & 0xff; memcpy(nbuf + 4, buf, len); spin_lock_irqsave(&is->buflock, flags); bf = is->first; bl = is->last; if (bf == bl) { printk(KERN_WARNING "ippp: Queue is full; discarding first buffer\n"); bf = bf->next; kfree(bf->buf); is->first = bf; } bl->buf = (char *) nbuf; bl->len = len + 4; is->last = bl->next; spin_unlock_irqrestore(&is->buflock, flags); wake_up_interruptible(&is->wq); return len; } /* * read() .. non-blocking: ipppd calls it only after select() * reports, that there is data */ int isdn_ppp_read(int min, struct file *file, char __user *buf, int count) { struct ippp_struct *is; struct ippp_buf_queue *b; u_long flags; u_char *save_buf; is = file->private_data; if (!(is->state & IPPP_OPEN)) return 0; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; spin_lock_irqsave(&is->buflock, flags); b = is->first->next; save_buf = b->buf; if (!save_buf) { spin_unlock_irqrestore(&is->buflock, flags); return -EAGAIN; } if (b->len < count) count = b->len; b->buf = NULL; is->first = b; spin_unlock_irqrestore(&is->buflock, flags); if (copy_to_user(buf, save_buf, count)) count = -EFAULT; kfree(save_buf); return count; } /* * ipppd wanna write a packet to the card .. non-blocking */ int isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) { isdn_net_local *lp; struct ippp_struct *is; int proto; unsigned char protobuf[4]; is = file->private_data; if (!(is->state & IPPP_CONNECT)) return 0; lp = is->lp; /* -> push it directly to the lowlevel interface */ if (!lp) printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); else { /* * Don't reset huptimer for * LCP packets. (Echo requests). */ if (copy_from_user(protobuf, buf, 4)) return -EFAULT; proto = PPP_PROTOCOL(protobuf); if (proto != PPP_LCP) lp->huptimer = 0; if (lp->isdn_device < 0 || lp->isdn_channel < 0) return 0; if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && lp->dialstate == 0 && (lp->flags & ISDN_NET_CONNECTED)) { unsigned short hl; struct sk_buff *skb; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(hl+count, GFP_ATOMIC); if (!skb) { printk(KERN_WARNING "isdn_ppp_write: out of memory!\n"); return count; } skb_reserve(skb, hl); if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } if (is->debug & 0x40) { printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32,is->unit,lp->ppp_slot); } isdn_ppp_send_ccp(lp->netdev,lp,skb); /* keeps CCP/compression states in sync */ isdn_net_write_super(lp, skb); } } return count; } /* * init memory, structures etc. */ int isdn_ppp_init(void) { int i, j; #ifdef CONFIG_ISDN_MPP if( isdn_ppp_mp_bundle_array_init() < 0 ) return -ENOMEM; #endif /* CONFIG_ISDN_MPP */ for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (!(ippp_table[i] = kzalloc(sizeof(struct ippp_struct), GFP_KERNEL))) { printk(KERN_WARNING "isdn_ppp_init: Could not alloc ippp_table\n"); for (j = 0; j < i; j++) kfree(ippp_table[j]); return -1; } spin_lock_init(&ippp_table[i]->buflock); ippp_table[i]->state = 0; ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1; ippp_table[i]->last = ippp_table[i]->rq; for (j = 0; j < NUM_RCV_BUFFS; j++) { ippp_table[i]->rq[j].buf = NULL; ippp_table[i]->rq[j].last = ippp_table[i]->rq + (NUM_RCV_BUFFS + j - 1) % NUM_RCV_BUFFS; ippp_table[i]->rq[j].next = ippp_table[i]->rq + (j + 1) % NUM_RCV_BUFFS; } } return 0; } void isdn_ppp_cleanup(void) { int i; for (i = 0; i < ISDN_MAX_CHANNELS; i++) kfree(ippp_table[i]); #ifdef CONFIG_ISDN_MPP kfree(isdn_ppp_bundle_arr); #endif /* CONFIG_ISDN_MPP */ } /* * check for address/control field and skip if allowed * retval != 0 -> discard packet silently */ static int isdn_ppp_skip_ac(struct ippp_struct *is, struct sk_buff *skb) { if (skb->len < 1) return -1; if (skb->data[0] == 0xff) { if (skb->len < 2) return -1; if (skb->data[1] != 0x03) return -1; // skip address/control (AC) field skb_pull(skb, 2); } else { if (is->pppcfg & SC_REJ_COMP_AC) // if AC compression was not negotiated, but used, discard packet return -1; } return 0; } /* * get the PPP protocol header and pull skb * retval < 0 -> discard packet silently */ static int isdn_ppp_strip_proto(struct sk_buff *skb) { int proto; if (skb->len < 1) return -1; if (skb->data[0] & 0x1) { // protocol field is compressed proto = skb->data[0]; skb_pull(skb, 1); } else { if (skb->len < 2) return -1; proto = ((int) skb->data[0] << 8) + skb->data[1]; skb_pull(skb, 2); } return proto; } /* * handler for incoming packets on a syncPPP interface */ void isdn_ppp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb) { struct ippp_struct *is; int slot; int proto; BUG_ON(net_dev->local->master); // we're called with the master device always slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_receive: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); return; } is = ippp_table[slot]; if (is->debug & 0x4) { printk(KERN_DEBUG "ippp_receive: is:%08lx lp:%08lx slot:%d unit:%d len:%d\n", (long)is,(long)lp,lp->ppp_slot,is->unit,(int) skb->len); isdn_ppp_frame_log("receive", skb->data, skb->len, 32,is->unit,lp->ppp_slot); } if (isdn_ppp_skip_ac(is, skb) < 0) { kfree_skb(skb); return; } proto = isdn_ppp_strip_proto(skb); if (proto < 0) { kfree_skb(skb); return; } #ifdef CONFIG_ISDN_MPP if (is->compflags & SC_LINK_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, NULL, &proto); if (!skb) // decompression error return; } if (!(is->mpppcfg & SC_REJ_MP_PROT)) { // we agreed to receive MPPP if (proto == PPP_MP) { isdn_ppp_mp_receive(net_dev, lp, skb); return; } } #endif isdn_ppp_push_higher(net_dev, lp, skb, proto); } /* * we receive a reassembled frame, MPPP has been taken care of before. * address/control and protocol have been stripped from the skb * note: net_dev has to be master net_dev */ static void isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb, int proto) { struct net_device *dev = net_dev->dev; struct ippp_struct *is, *mis; isdn_net_local *mlp = NULL; int slot; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: lp->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } is = ippp_table[slot]; if (lp->master) { // FIXME? mlp = ISDN_MASTER_PRIV(lp); slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_push_higher: master->ppp_slot(%d)\n", lp->ppp_slot); goto drop_packet; } } mis = ippp_table[slot]; if (is->debug & 0x10) { printk(KERN_DEBUG "push, skb %d %04x\n", (int) skb->len, proto); isdn_ppp_frame_log("rpush", skb->data, skb->len, 32,is->unit,lp->ppp_slot); } if (mis->compflags & SC_DECOMP_ON) { skb = isdn_ppp_decompress(skb, is, mis, &proto); if (!skb) // decompression error return; } switch (proto) { case PPP_IPX: /* untested */ if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IPX\n"); skb->protocol = htons(ETH_P_IPX); break; case PPP_IP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: IP\n"); skb->protocol = htons(ETH_P_IP); break; case PPP_COMP: case PPP_COMPFRAG: printk(KERN_INFO "isdn_ppp: unexpected compressed frame dropped\n"); goto drop_packet; #ifdef CONFIG_ISDN_PPP_VJ case PPP_VJC_UNCOMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n"); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) { printk(KERN_WARNING "isdn_ppp: received illegal VJC_UNCOMP frame!\n"); goto drop_packet; } skb->protocol = htons(ETH_P_IP); break; case PPP_VJC_COMP: if (is->debug & 0x20) printk(KERN_DEBUG "isdn_ppp: VJC_COMP\n"); { struct sk_buff *skb_old = skb; int pkt_len; skb = dev_alloc_skb(skb_old->len + 128); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name); skb = skb_old; goto drop_packet; } skb_put(skb, skb_old->len + 128); skb_copy_from_linear_data(skb_old, skb->data, skb_old->len); if (net_dev->local->ppp_slot < 0) { printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n", __func__, net_dev->local->ppp_slot); goto drop_packet; } pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb_old->len); kfree_skb(skb_old); if (pkt_len < 0) goto drop_packet; skb_trim(skb, pkt_len); skb->protocol = htons(ETH_P_IP); } break; #endif case PPP_CCP: case PPP_CCPFRAG: isdn_ppp_receive_ccp(net_dev,lp,skb,proto); /* Dont pop up ResetReq/Ack stuff to the daemon any longer - the job is done already */ if(skb->data[0] == CCP_RESETREQ || skb->data[0] == CCP_RESETACK) break; /* fall through */ default: isdn_ppp_fill_rq(skb->data, skb->len, proto, lp->ppp_slot); /* push data to pppd device */ kfree_skb(skb); return; } #ifdef CONFIG_IPPP_FILTER /* check if the packet passes the pass and active filters * the filter instructions are constructed assuming * a four-byte PPP header on each packet (which is still present) */ skb_push(skb, 4); { u_int16_t *p = (u_int16_t *) skb->data; *p = 0; /* indicate inbound */ } if (is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); kfree_skb(skb); return; } if (!(is->active_filter && sk_run_filter(skb, is->active_filter) == 0)) { if (is->debug & 0x2) printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); lp->huptimer = 0; if (mlp) mlp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; if (mlp) mlp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ skb->dev = dev; skb_reset_mac_header(skb); netif_rx(skb); /* net_dev->local->stats.rx_packets++; done in isdn_net.c */ return; drop_packet: net_dev->local->stats.rx_dropped++; kfree_skb(skb); } /* * isdn_ppp_skb_push .. * checks whether we have enough space at the beginning of the skb * and allocs a new SKB if necessary */ static unsigned char *isdn_ppp_skb_push(struct sk_buff **skb_p,int len) { struct sk_buff *skb = *skb_p; if(skb_headroom(skb) < len) { struct sk_buff *nskb = skb_realloc_headroom(skb, len); if (!nskb) { printk(KERN_ERR "isdn_ppp_skb_push: can't realloc headroom!\n"); dev_kfree_skb(skb); return NULL; } printk(KERN_DEBUG "isdn_ppp_skb_push:under %d %d\n",skb_headroom(skb),len); dev_kfree_skb(skb); *skb_p = nskb; return skb_push(nskb, len); } return skb_push(skb,len); } /* * send ppp frame .. we expect a PIDCOMPressable proto -- * (here: currently always PPP_IP,PPP_VJC_COMP,PPP_VJC_UNCOMP) * * VJ compression may change skb pointer!!! .. requeue with old * skb isn't allowed!! */ int isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) { isdn_net_local *lp,*mlp; isdn_net_dev *nd; unsigned int proto = PPP_IP; /* 0x21 */ struct ippp_struct *ipt,*ipts; int slot, retval = NETDEV_TX_OK; mlp = netdev_priv(netdev); nd = mlp->netdev; /* get master lp */ slot = mlp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", mlp->ppp_slot); kfree_skb(skb); goto out; } ipts = ippp_table[slot]; if (!(ipts->pppcfg & SC_ENABLE_IP)) { /* PPP connected ? */ if (ipts->debug & 0x1) printk(KERN_INFO "%s: IP frame delayed.\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; /* untested */ break; default: printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", skb->protocol); dev_kfree_skb(skb); goto out; } lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", netdev->name); retval = NETDEV_TX_BUSY; goto out; } /* we have our lp locked from now on */ slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "isdn_ppp_xmit: lp->ppp_slot(%d)\n", lp->ppp_slot); kfree_skb(skb); goto unlock; } ipt = ippp_table[slot]; /* * after this line .. requeueing in the device queue is no longer allowed!!! */ /* Pull off the fake header we stuck on earlier to keep * the fragmentation code happy. */ skb_pull(skb,IPPP_MAX_HEADER); #ifdef CONFIG_IPPP_FILTER /* check if we should pass this packet * the filter instructions are constructed assuming * a four-byte PPP header on each packet */ *skb_push(skb, 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } if (ipt->pass_filter && sk_run_filter(skb, ipt->pass_filter) == 0) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); kfree_skb(skb); goto unlock; } if (!(ipt->active_filter && sk_run_filter(skb, ipt->active_filter) == 0)) { if (ipt->debug & 0x4) printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n"); lp->huptimer = 0; } skb_pull(skb, 4); #else /* CONFIG_IPPP_FILTER */ lp->huptimer = 0; #endif /* CONFIG_IPPP_FILTER */ if (ipt->debug & 0x4) printk(KERN_DEBUG "xmit skb, len %d\n", (int) skb->len); if (ipts->debug & 0x40) isdn_ppp_frame_log("xmit0", skb->data, skb->len, 32,ipts->unit,lp->ppp_slot); #ifdef CONFIG_ISDN_PPP_VJ if (proto == PPP_IP && ipts->pppcfg & SC_COMP_TCP) { /* ipts here? probably yes, but check this again */ struct sk_buff *new_skb; unsigned short hl; /* * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want. */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen + IPPP_MAX_HEADER; /* * Note: hl might still be insufficient because the method * above does not account for a possibible MPPP slave channel * which had larger HL header space requirements than the * master. */ new_skb = alloc_skb(hl+skb->len, GFP_ATOMIC); if (new_skb) { u_char *buf; int pktlen; skb_reserve(new_skb, hl); new_skb->dev = skb->dev; skb_put(new_skb, skb->len); buf = skb->data; pktlen = slhc_compress(ipts->slcomp, skb->data, skb->len, new_skb->data, &buf, !(ipts->pppcfg & SC_NO_TCP_CCID)); if (buf != skb->data) { if (new_skb->data != buf) printk(KERN_ERR "isdn_ppp: FATAL error after slhc_compress!!\n"); dev_kfree_skb(skb); skb = new_skb; } else { dev_kfree_skb(new_skb); } skb_trim(skb, pktlen); if (skb->data[0] & SL_TYPE_COMPRESSED_TCP) { /* cslip? style -> PPP */ proto = PPP_VJC_COMP; skb->data[0] ^= SL_TYPE_COMPRESSED_TCP; } else { if (skb->data[0] >= SL_TYPE_UNCOMPRESSED_TCP) proto = PPP_VJC_UNCOMP; skb->data[0] = (skb->data[0] & 0x0f) | 0x40; } } } #endif /* * normal (single link) or bundle compression */ if(ipts->compflags & SC_COMP_ON) { /* We send compressed only if both down- und upstream compression is negotiated, that means, CCP is up */ if(ipts->compflags & SC_DECOMP_ON) { skb = isdn_ppp_compress(skb,&proto,ipt,ipts,0); } else { printk(KERN_DEBUG "isdn_ppp: CCP not yet up - sending as-is\n"); } } if (ipt->debug & 0x24) printk(KERN_DEBUG "xmit2 skb, len %d, proto %04x\n", (int) skb->len, proto); #ifdef CONFIG_ISDN_MPP if (ipt->mpppcfg & SC_MP_PROT) { /* we get mp_seqno from static isdn_net_local */ long mp_seqno = ipts->mp_seqno; ipts->mp_seqno++; if (ipt->mpppcfg & SC_OUT_SHORT_SEQ) { unsigned char *data = isdn_ppp_skb_push(&skb, 3); if(!data) goto unlock; mp_seqno &= 0xfff; data[0] = MP_BEGIN_FRAG | MP_END_FRAG | ((mp_seqno >> 8) & 0xf); /* (B)egin & (E)ndbit .. */ data[1] = mp_seqno & 0xff; data[2] = proto; /* PID compression */ } else { unsigned char *data = isdn_ppp_skb_push(&skb, 5); if(!data) goto unlock; data[0] = MP_BEGIN_FRAG | MP_END_FRAG; /* (B)egin & (E)ndbit .. */ data[1] = (mp_seqno >> 16) & 0xff; /* sequence number: 24bit */ data[2] = (mp_seqno >> 8) & 0xff; data[3] = (mp_seqno >> 0) & 0xff; data[4] = proto; /* PID compression */ } proto = PPP_MP; /* MP Protocol, 0x003d */ } #endif /* * 'link in bundle' compression ... */ if(ipt->compflags & SC_LINK_COMP_ON) skb = isdn_ppp_compress(skb,&proto,ipt,ipts,1); if( (ipt->pppcfg & SC_COMP_PROT) && (proto <= 0xff) ) { unsigned char *data = isdn_ppp_skb_push(&skb,1); if(!data) goto unlock; data[0] = proto & 0xff; } else { unsigned char *data = isdn_ppp_skb_push(&skb,2); if(!data) goto unlock; data[0] = (proto >> 8) & 0xff; data[1] = proto & 0xff; } if(!(ipt->pppcfg & SC_COMP_AC)) { unsigned char *data = isdn_ppp_skb_push(&skb,2); if(!data) goto unlock; data[0] = 0xff; /* All Stations */ data[1] = 0x03; /* Unnumbered information */ } /* tx-stats are now updated via BSENT-callback */ if (ipts->debug & 0x40) { printk(KERN_DEBUG "skb xmit: len: %d\n", (int) skb->len); isdn_ppp_frame_log("xmit", skb->data, skb->len, 32,ipt->unit,lp->ppp_slot); } isdn_net_writebuf_skb(lp, skb); unlock: spin_unlock_bh(&lp->xmit_lock); out: return retval; } #ifdef CONFIG_IPPP_FILTER /* * check if this packet may trigger auto-dial. */ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp) { struct ippp_struct *is = ippp_table[lp->ppp_slot]; u_int16_t proto; int drop = 0; switch (ntohs(skb->protocol)) { case ETH_P_IP: proto = PPP_IP; break; case ETH_P_IPX: proto = PPP_IPX; break; default: printk(KERN_ERR "isdn_ppp_autodial_filter: unsupported protocol 0x%x.\n", skb->protocol); return 1; } /* the filter instructions are constructed assuming * a four-byte PPP header on each packet. we have to * temporarily remove part of the fake header stuck on * earlier. */ *skb_pull(skb, IPPP_MAX_HEADER - 4) = 1; /* indicate outbound */ { __be16 *p = (__be16 *)skb->data; p++; *p = htons(proto); } drop |= is->pass_filter && sk_run_filter(skb, is->pass_filter) == 0; drop |= is->active_filter && sk_run_filter(skb, is->active_filter) == 0; skb_push(skb, IPPP_MAX_HEADER - 4); return drop; } #endif #ifdef CONFIG_ISDN_MPP /* this is _not_ rfc1990 header, but something we convert both short and long * headers to for convinience's sake: * byte 0 is flags as in rfc1990 * bytes 1...4 is 24-bit seqence number converted to host byte order */ #define MP_HEADER_LEN 5 #define MP_LONGSEQ_MASK 0x00ffffff #define MP_SHORTSEQ_MASK 0x00000fff #define MP_LONGSEQ_MAX MP_LONGSEQ_MASK #define MP_SHORTSEQ_MAX MP_SHORTSEQ_MASK #define MP_LONGSEQ_MAXBIT ((MP_LONGSEQ_MASK+1)>>1) #define MP_SHORTSEQ_MAXBIT ((MP_SHORTSEQ_MASK+1)>>1) /* sequence-wrap safe comparisons (for long sequence)*/ #define MP_LT(a,b) ((a-b)&MP_LONGSEQ_MAXBIT) #define MP_LE(a,b) !((b-a)&MP_LONGSEQ_MAXBIT) #define MP_GT(a,b) ((b-a)&MP_LONGSEQ_MAXBIT) #define MP_GE(a,b) !((a-b)&MP_LONGSEQ_MAXBIT) #define MP_SEQ(f) ((*(u32*)(f->data+1))) #define MP_FLAGS(f) (f->data[0]) static int isdn_ppp_mp_bundle_array_init(void) { int i; int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) return -ENOMEM; for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) spin_lock_init(&isdn_ppp_bundle_arr[i].lock); return 0; } static ippp_bundle * isdn_ppp_mp_bundle_alloc(void) { int i; for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) if (isdn_ppp_bundle_arr[i].ref_ct <= 0) return (isdn_ppp_bundle_arr + i); return NULL; } static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to ) { struct ippp_struct * is; if (lp->ppp_slot < 0) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return(-EINVAL); } is = ippp_table[lp->ppp_slot]; if (add_to) { if( lp->netdev->pb ) lp->netdev->pb->ref_ct--; lp->netdev->pb = add_to; } else { /* first link in a bundle */ is->mp_seqno = 0; if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) return -ENOMEM; lp->next = lp->last = lp; /* nobody else in a queue */ lp->netdev->pb->frags = NULL; lp->netdev->pb->frames = 0; lp->netdev->pb->seq = UINT_MAX; } lp->netdev->pb->ref_ct++; is->last_link_seqno = 0; return 0; } static u32 isdn_ppp_mp_get_seq( int short_seq, struct sk_buff * skb, u32 last_seq ); static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, struct sk_buff * from, struct sk_buff * to ); static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff * from, struct sk_buff * to ); static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb ); static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff *skb) { struct ippp_struct *is; isdn_net_local * lpq; ippp_bundle * mp; isdn_mppp_stats * stats; struct sk_buff * newfrag, * frag, * start, *nextf; u32 newseq, minseq, thisseq; unsigned long flags; int slot; spin_lock_irqsave(&net_dev->pb->lock, flags); mp = net_dev->pb; stats = &mp->stats; slot = lp->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", __func__, lp->ppp_slot); stats->frame_drops++; dev_kfree_skb(skb); spin_unlock_irqrestore(&mp->lock, flags); return; } is = ippp_table[slot]; if( ++mp->frames > stats->max_queue_len ) stats->max_queue_len = mp->frames; if (is->debug & 0x8) isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, skb, is->last_link_seqno); /* if this packet seq # is less than last already processed one, * toss it right away, but check for sequence start case first */ if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { mp->seq = newseq; /* the first packet: required for * rfc1990 non-compliant clients -- * prevents constant packet toss */ } else if( MP_LT(newseq, mp->seq) ) { stats->frame_drops++; isdn_ppp_mp_free_skb(mp, skb); spin_unlock_irqrestore(&mp->lock, flags); return; } /* find the minimum received sequence number over all links */ is->last_link_seqno = minseq = newseq; for (lpq = net_dev->queue;;) { slot = lpq->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n", __func__, lpq->ppp_slot); } else { u32 lls = ippp_table[slot]->last_link_seqno; if (MP_LT(lls, minseq)) minseq = lls; } if ((lpq = lpq->next) == net_dev->queue) break; } if (MP_LT(minseq, mp->seq)) minseq = mp->seq; /* can't go beyond already processed * packets */ newfrag = skb; /* if this new fragment is before the first one, then enqueue it now. */ if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { newfrag->next = frag; mp->frags = frag = newfrag; newfrag = NULL; } start = MP_FLAGS(frag) & MP_BEGIN_FRAG && MP_SEQ(frag) == mp->seq ? frag : NULL; /* * main fragment traversing loop * * try to accomplish several tasks: * - insert new fragment into the proper sequence slot (once that's done * newfrag will be set to NULL) * - reassemble any complete fragment sequence (non-null 'start' * indicates there is a contiguous sequence present) * - discard any incomplete sequences that are below minseq -- due * to the fact that sender always increment sequence number, if there * is an incomplete sequence below minseq, no new fragments would * come to complete such sequence and it should be discarded * * loop completes when we accomplished the following tasks: * - new fragment is inserted in the proper sequence ('newfrag' is * set to NULL) * - we hit a gap in the sequence, so no reassembly/processing is * possible ('start' would be set to NULL) * * algorithm for this code is derived from code in the book * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) */ while (start != NULL || newfrag != NULL) { thisseq = MP_SEQ(frag); nextf = frag->next; /* drop any duplicate fragments */ if (newfrag != NULL && thisseq == newseq) { isdn_ppp_mp_free_skb(mp, newfrag); newfrag = NULL; } /* insert new fragment before next element if possible. */ if (newfrag != NULL && (nextf == NULL || MP_LT(newseq, MP_SEQ(nextf)))) { newfrag->next = nextf; frag->next = nextf = newfrag; newfrag = NULL; } if (start != NULL) { /* check for misplaced start */ if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { printk(KERN_WARNING"isdn_mppp(seq %d): new " "BEGIN flag with no prior END", thisseq); stats->seqerrs++; stats->frame_drops++; start = isdn_ppp_mp_discard(mp, start,frag); nextf = frag->next; } } else if (MP_LE(thisseq, minseq)) { if (MP_FLAGS(frag) & MP_BEGIN_FRAG) start = frag; else { if (MP_FLAGS(frag) & MP_END_FRAG) stats->frame_drops++; if( mp->frags == frag ) mp->frags = nextf; isdn_ppp_mp_free_skb(mp, frag); frag = nextf; continue; } } /* if start is non-null and we have end fragment, then * we have full reassembly sequence -- reassemble * and process packet now */ if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; /* Reassemble the packet then dispatch it */ isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); start = NULL; frag = NULL; mp->frags = nextf; } /* check if need to update start pointer: if we just * reassembled the packet and sequence is contiguous * then next fragment should be the start of new reassembly * if sequence is contiguous, but we haven't reassembled yet, * keep going. * if sequence is not contiguous, either clear everything * below low watermark and set start to the next frag or * clear start ptr. */ if (nextf != NULL && ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { /* if we just reassembled and the next one is here, * then start another reassembly. */ if (frag == NULL) { if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) start = nextf; else { printk(KERN_WARNING"isdn_mppp(seq %d):" " END flag with no following " "BEGIN", thisseq); stats->seqerrs++; } } } else { if ( nextf != NULL && frag != NULL && MP_LT(thisseq, minseq)) { /* we've got a break in the sequence * and we not at the end yet * and we did not just reassembled *(if we did, there wouldn't be anything before) * and we below the low watermark * discard all the frames below low watermark * and start over */ stats->frame_drops++; mp->frags = isdn_ppp_mp_discard(mp,start,nextf); } /* break in the sequence, no reassembly */ start = NULL; } frag = nextf; } /* while -- main loop */ if (mp->frags == NULL) mp->frags = frag; /* rather straighforward way to deal with (not very) possible * queue overflow */ if (mp->frames > MP_MAX_QUEUE_LEN) { stats->overflows++; while (mp->frames > MP_MAX_QUEUE_LEN) { frag = mp->frags->next; isdn_ppp_mp_free_skb(mp, mp->frags); mp->frags = frag; } } spin_unlock_irqrestore(&mp->lock, flags); } static void isdn_ppp_mp_cleanup( isdn_net_local * lp ) { struct sk_buff * frag = lp->netdev->pb->frags; struct sk_buff * nextfrag; while( frag ) { nextfrag = frag->next; isdn_ppp_mp_free_skb(lp->netdev->pb, frag); frag = nextfrag; } lp->netdev->pb->frags = NULL; } static u32 isdn_ppp_mp_get_seq( int short_seq, struct sk_buff * skb, u32 last_seq ) { u32 seq; int flags = skb->data[0] & (MP_BEGIN_FRAG | MP_END_FRAG); if( !short_seq ) { seq = ntohl(*(__be32 *)skb->data) & MP_LONGSEQ_MASK; skb_push(skb,1); } else { /* convert 12-bit short seq number to 24-bit long one */ seq = ntohs(*(__be16 *)skb->data) & MP_SHORTSEQ_MASK; /* check for seqence wrap */ if( !(seq & MP_SHORTSEQ_MAXBIT) && (last_seq & MP_SHORTSEQ_MAXBIT) && (unsigned long)last_seq <= MP_LONGSEQ_MAX ) seq |= (last_seq + MP_SHORTSEQ_MAX+1) & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); else seq |= last_seq & (~MP_SHORTSEQ_MASK & MP_LONGSEQ_MASK); skb_push(skb, 3); /* put converted seqence back in skb */ } *(u32*)(skb->data+1) = seq; /* put seqence back in _host_ byte * order */ skb->data[0] = flags; /* restore flags */ return seq; } struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, struct sk_buff * from, struct sk_buff * to ) { if( from ) while (from != to) { struct sk_buff * next = from->next; isdn_ppp_mp_free_skb(mp, from); from = next; } return from; } void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff * from, struct sk_buff * to ) { ippp_bundle * mp = net_dev->pb; int proto; struct sk_buff * skb; unsigned int tot_len; if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) { if( ippp_table[lp->ppp_slot]->debug & 0x40 ) printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " "len %d\n", MP_SEQ(from), from->len ); skb = from; skb_pull(skb, MP_HEADER_LEN); mp->frames--; } else { struct sk_buff * frag; int n; for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++) tot_len += frag->len - MP_HEADER_LEN; if( ippp_table[lp->ppp_slot]->debug & 0x40 ) printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " "to %d, len %d\n", MP_SEQ(from), (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len ); if( (skb = dev_alloc_skb(tot_len)) == NULL ) { printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " "of size %d\n", tot_len); isdn_ppp_mp_discard(mp, from, to); return; } while( from != to ) { unsigned int len = from->len - MP_HEADER_LEN; skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, skb_put(skb,len), len); frag = from->next; isdn_ppp_mp_free_skb(mp, from); from = frag; } } proto = isdn_ppp_strip_proto(skb); isdn_ppp_push_higher(net_dev, lp, skb, proto); } static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb) { dev_kfree_skb(skb); mp->frames--; } static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ) { printk(KERN_DEBUG "mp_recv: %d/%d -> %02x %02x %02x %02x %02x %02x\n", slot, (int) skb->len, (int) skb->data[0], (int) skb->data[1], (int) skb->data[2], (int) skb->data[3], (int) skb->data[4], (int) skb->data[5]); } static int isdn_ppp_bundle(struct ippp_struct *is, int unit) { char ifn[IFNAMSIZ + 1]; isdn_net_dev *p; isdn_net_local *lp, *nlp; int rc; unsigned long flags; sprintf(ifn, "ippp%d", unit); p = isdn_net_findif(ifn); if (!p) { printk(KERN_ERR "ippp_bundle: cannot find %s\n", ifn); return -EINVAL; } spin_lock_irqsave(&p->pb->lock, flags); nlp = is->lp; lp = p->queue; if( nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS || lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS ) { printk(KERN_ERR "ippp_bundle: binding to invalid slot %d\n", nlp->ppp_slot < 0 || nlp->ppp_slot >= ISDN_MAX_CHANNELS ? nlp->ppp_slot : lp->ppp_slot ); rc = -EINVAL; goto out; } isdn_net_add_to_bundle(p, nlp); ippp_table[nlp->ppp_slot]->unit = ippp_table[lp->ppp_slot]->unit; /* maybe also SC_CCP stuff */ ippp_table[nlp->ppp_slot]->pppcfg |= ippp_table[lp->ppp_slot]->pppcfg & (SC_ENABLE_IP | SC_NO_TCP_CCID | SC_REJ_COMP_TCP); ippp_table[nlp->ppp_slot]->mpppcfg |= ippp_table[lp->ppp_slot]->mpppcfg & (SC_MP_PROT | SC_REJ_MP_PROT | SC_OUT_SHORT_SEQ | SC_IN_SHORT_SEQ); rc = isdn_ppp_mp_init(nlp, p->pb); out: spin_unlock_irqrestore(&p->pb->lock, flags); return rc; } #endif /* CONFIG_ISDN_MPP */ /* * network device ioctl handlers */ static int isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev) { struct ppp_stats __user *res = ifr->ifr_data; struct ppp_stats t; isdn_net_local *lp = netdev_priv(dev); if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats))) return -EFAULT; /* build a temporary stat struct and copy it to user space */ memset(&t, 0, sizeof(struct ppp_stats)); if (dev->flags & IFF_UP) { t.p.ppp_ipackets = lp->stats.rx_packets; t.p.ppp_ibytes = lp->stats.rx_bytes; t.p.ppp_ierrors = lp->stats.rx_errors; t.p.ppp_opackets = lp->stats.tx_packets; t.p.ppp_obytes = lp->stats.tx_bytes; t.p.ppp_oerrors = lp->stats.tx_errors; #ifdef CONFIG_ISDN_PPP_VJ if (slot >= 0 && ippp_table[slot]->slcomp) { struct slcompress *slcomp = ippp_table[slot]->slcomp; t.vj.vjs_packets = slcomp->sls_o_compressed + slcomp->sls_o_uncompressed; t.vj.vjs_compressed = slcomp->sls_o_compressed; t.vj.vjs_searches = slcomp->sls_o_searches; t.vj.vjs_misses = slcomp->sls_o_misses; t.vj.vjs_errorin = slcomp->sls_i_error; t.vj.vjs_tossed = slcomp->sls_i_tossed; t.vj.vjs_uncompressedin = slcomp->sls_i_uncompressed; t.vj.vjs_compressedin = slcomp->sls_i_compressed; } #endif } if (copy_to_user(res, &t, sizeof(struct ppp_stats))) return -EFAULT; return 0; } int isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int error=0; int len; isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) return -EINVAL; switch (cmd) { #define PPP_VERSION "2.3.7" case SIOCGPPPVER: len = strlen(PPP_VERSION) + 1; if (copy_to_user(ifr->ifr_data, PPP_VERSION, len)) error = -EFAULT; break; case SIOCGPPPSTATS: error = isdn_ppp_dev_ioctl_stats(lp->ppp_slot, ifr, dev); break; default: error = -EINVAL; break; } return error; } static int isdn_ppp_if_get_unit(char *name) { int len, i, unit = 0, deci; len = strlen(name); if (strncmp("ippp", name, 4) || len > 8) return -1; for (i = 0, deci = 1; i < len; i++, deci *= 10) { char a = name[len - i - 1]; if (a >= '0' && a <= '9') unit += (a - '0') * deci; else break; } if (!i || len - i != 4) unit = -1; return unit; } int isdn_ppp_dial_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (!(mlp->flags & ISDN_NET_CONNECTED)) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_dial_req(netdev_priv(sdev)); return 0; #else return -1; #endif } int isdn_ppp_hangup_slave(char *name) { #ifdef CONFIG_ISDN_MPP isdn_net_dev *ndev; isdn_net_local *lp; struct net_device *sdev; if (!(ndev = isdn_net_findif(name))) return 1; lp = ndev->local; if (!(lp->flags & ISDN_NET_CONNECTED)) return 5; sdev = lp->slave; while (sdev) { isdn_net_local *mlp = netdev_priv(sdev); if (mlp->slave) { /* find last connected link in chain */ isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp); if (!(nlp->flags & ISDN_NET_CONNECTED)) break; } else if (mlp->flags & ISDN_NET_CONNECTED) break; sdev = mlp->slave; } if (!sdev) return 2; isdn_net_hangup(sdev); return 0; #else return -1; #endif } /* * PPP compression stuff */ /* Push an empty CCP Data Frame up to the daemon to wake it up and let it generate a CCP Reset-Request or tear down CCP altogether */ static void isdn_ppp_ccp_kickup(struct ippp_struct *is) { isdn_ppp_fill_rq(NULL, 0, PPP_COMP, is->lp->ppp_slot); } /* In-kernel handling of CCP Reset-Request and Reset-Ack is necessary, but absolutely nontrivial. The most abstruse problem we are facing is that the generation, reception and all the handling of timeouts and resends including proper request id management should be entirely left to the (de)compressor, but indeed is not covered by the current API to the (de)compressor. The API is a prototype version from PPP where only some (de)compressors have yet been implemented and all of them are rather simple in their reset handling. Especially, their is only one outstanding ResetAck at a time with all of them and ResetReq/-Acks do not have parameters. For this very special case it was sufficient to just return an error code from the decompressor and have a single reset() entry to communicate all the necessary information between the framework and the (de)compressor. Bad enough, LZS is different (and any other compressor may be different, too). It has multiple histories (eventually) and needs to Reset each of them independently and thus uses multiple outstanding Acks and history numbers as an additional parameter to Reqs/Acks. All that makes it harder to port the reset state engine into the kernel because it is not just the same simple one as in (i)pppd but it must be able to pass additional parameters and have multiple out- standing Acks. We are trying to achieve the impossible by handling reset transactions independent by their id. The id MUST change when the data portion changes, thus any (de)compressor who uses more than one resettable state must provide and recognize individual ids for each individual reset transaction. The framework itself does _only_ differentiate them by id, because it has no other semantics like the (de)compressor might. This looks like a major redesign of the interface would be nice, but I don't have an idea how to do it better. */ /* Send a CCP Reset-Request or Reset-Ack directly from the kernel. This is getting that lengthy because there is no simple "send-this-frame-out" function above but every wrapper does a bit different. Hope I guess correct in this hack... */ static void isdn_ppp_ccp_xmit_reset(struct ippp_struct *is, int proto, unsigned char code, unsigned char id, unsigned char *data, int len) { struct sk_buff *skb; unsigned char *p; int hl; int cnt = 0; isdn_net_local *lp = is->lp; /* Alloc large enough skb */ hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; skb = alloc_skb(len + hl + 16,GFP_ATOMIC); if(!skb) { printk(KERN_WARNING "ippp: CCP cannot send reset - out of memory\n"); return; } skb_reserve(skb, hl); /* We may need to stuff an address and control field first */ if(!(is->pppcfg & SC_COMP_AC)) { p = skb_put(skb, 2); *p++ = 0xff; *p++ = 0x03; } /* Stuff proto, code, id and length */ p = skb_put(skb, 6); *p++ = (proto >> 8); *p++ = (proto & 0xff); *p++ = code; *p++ = id; cnt = 4 + len; *p++ = (cnt >> 8); *p++ = (cnt & 0xff); /* Now stuff remaining bytes */ if(len) { p = skb_put(skb, len); memcpy(p, data, len); } /* skb is now ready for xmit */ printk(KERN_DEBUG "Sending CCP Frame:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit,lp->ppp_slot); isdn_net_write_super(lp, skb); } /* Allocate the reset state vector */ static struct ippp_ccp_reset *isdn_ppp_ccp_reset_alloc(struct ippp_struct *is) { struct ippp_ccp_reset *r; r = kzalloc(sizeof(struct ippp_ccp_reset), GFP_KERNEL); if(!r) { printk(KERN_ERR "ippp_ccp: failed to allocate reset data" " structure - no mem\n"); return NULL; } printk(KERN_DEBUG "ippp_ccp: allocated reset data structure %p\n", r); is->reset = r; return r; } /* Destroy the reset state vector. Kill all pending timers first. */ static void isdn_ppp_ccp_reset_free(struct ippp_struct *is) { unsigned int id; printk(KERN_DEBUG "ippp_ccp: freeing reset data structure %p\n", is->reset); for(id = 0; id < 256; id++) { if(is->reset->rs[id]) { isdn_ppp_ccp_reset_free_state(is, (unsigned char)id); } } kfree(is->reset); is->reset = NULL; } /* Free a given state and clear everything up for later reallocation */ static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if(is->reset->rs[id]) { printk(KERN_DEBUG "ippp_ccp: freeing state for id %d\n", id); rs = is->reset->rs[id]; /* Make sure the kernel will not call back later */ if(rs->ta) del_timer(&rs->timer); is->reset->rs[id] = NULL; kfree(rs); } else { printk(KERN_WARNING "ippp_ccp: id %d is not allocated\n", id); } } /* The timer callback function which is called when a ResetReq has timed out, aka has never been answered by a ResetAck */ static void isdn_ppp_ccp_timer_callback(unsigned long closure) { struct ippp_ccp_reset_state *rs = (struct ippp_ccp_reset_state *)closure; if(!rs) { printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n"); return; } if(rs->ta && rs->state == CCPResetSentReq) { /* We are correct here */ if(!rs->expra) { /* Hmm, there is no Ack really expected. We can clean up the state now, it will be reallocated if the decompressor insists on another reset */ rs->ta = 0; isdn_ppp_ccp_reset_free_state(rs->is, rs->id); return; } printk(KERN_DEBUG "ippp_ccp: CCP Reset timed out for id %d\n", rs->id); /* Push it again */ isdn_ppp_ccp_xmit_reset(rs->is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Restart timer */ rs->timer.expires = jiffies + HZ*5; add_timer(&rs->timer); } else { printk(KERN_WARNING "ippp_ccp: timer cb in wrong state %d\n", rs->state); } } /* Allocate a new reset transaction state */ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs; if(is->reset->rs[id]) { printk(KERN_WARNING "ippp_ccp: old state exists for id %d\n", id); return NULL; } else { rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); if(!rs) return NULL; rs->state = CCPResetIdle; rs->is = is; rs->id = id; init_timer(&rs->timer); rs->timer.data = (unsigned long)rs; rs->timer.function = isdn_ppp_ccp_timer_callback; is->reset->rs[id] = rs; } return rs; } /* A decompressor wants a reset with a set of parameters - do what is necessary to fulfill it */ static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is, struct isdn_ppp_resetparams *rp) { struct ippp_ccp_reset_state *rs; if(rp->valid) { /* The decompressor defines parameters by itself */ if(rp->rsend) { /* And he wants us to send a request */ if(!(rp->idval)) { printk(KERN_ERR "ippp_ccp: decompressor must" " specify reset id\n"); return; } if(is->reset->rs[rp->id]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[rp->id]; if(rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { /* Ok, this is a new transaction */ printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", rp->id); rs = isdn_ppp_ccp_reset_alloc_state(is, rp->id); if(!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; rs->expra = rp->expra; if(rp->dtval) { rs->dlen = rp->dlen; memcpy(rs->data, rp->data, rp->dlen); } /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, rs->data, rs->dlen); /* Start the timer */ rs->timer.expires = jiffies + 5*HZ; add_timer(&rs->timer); rs->ta = 1; } } else { printk(KERN_DEBUG "ippp_ccp: no reset sent\n"); } } else { /* The reset params are invalid. The decompressor does not care about them, so we just send the minimal requests and increase ids only when an Ack is received for a given id */ if(is->reset->rs[is->reset->lastid]) { /* There is already a transaction in existence for this id. May be still waiting for a Ack or may be wrong. */ rs = is->reset->rs[is->reset->lastid]; if(rs->state == CCPResetSentReq && rs->ta) { printk(KERN_DEBUG "ippp_ccp: reset" " trans still in progress" " for id %d\n", rp->id); } else { printk(KERN_WARNING "ippp_ccp: reset" " trans in wrong state %d for" " id %d\n", rs->state, rp->id); } } else { printk(KERN_DEBUG "ippp_ccp: new trans for id" " %d to be started\n", is->reset->lastid); rs = isdn_ppp_ccp_reset_alloc_state(is, is->reset->lastid); if(!rs) { printk(KERN_ERR "ippp_ccp: out of mem" " allocing ccp trans\n"); return; } rs->state = CCPResetSentReq; /* We always expect an Ack if the decompressor doesn't know better */ rs->expra = 1; rs->dlen = 0; /* HACK TODO - add link comp here */ isdn_ppp_ccp_xmit_reset(is, PPP_CCP, CCP_RESETREQ, rs->id, NULL, 0); /* Start the timer */ rs->timer.expires = jiffies + 5*HZ; add_timer(&rs->timer); rs->ta = 1; } } } /* An Ack was received for this id. This means we stop the timer and clean up the state prior to calling the decompressors reset routine. */ static void isdn_ppp_ccp_reset_ack_rcvd(struct ippp_struct *is, unsigned char id) { struct ippp_ccp_reset_state *rs = is->reset->rs[id]; if(rs) { if(rs->ta && rs->state == CCPResetSentReq) { /* Great, we are correct */ if(!rs->expra) printk(KERN_DEBUG "ippp_ccp: ResetAck received" " for id %d but not expected\n", id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received out of" "sync for id %d\n", id); } if(rs->ta) { rs->ta = 0; del_timer(&rs->timer); } isdn_ppp_ccp_reset_free_state(is, id); } else { printk(KERN_INFO "ippp_ccp: ResetAck received for unknown id" " %d\n", id); } /* Make sure the simple reset stuff uses a new id next time */ is->reset->lastid++; } /* * decompress packet * * if master = 0, we're trying to uncompress an per-link compressed packet, * as opposed to an compressed reconstructed-from-MPPP packet. * proto is updated to protocol field of uncompressed packet. * * retval: decompressed packet, * same packet if uncompressed, * NULL if decompression error */ static struct sk_buff *isdn_ppp_decompress(struct sk_buff *skb,struct ippp_struct *is,struct ippp_struct *master, int *proto) { void *stat = NULL; struct isdn_ppp_compressor *ipc = NULL; struct sk_buff *skb_out; int len; struct ippp_struct *ri; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; if(!master) { // per-link decompression stat = is->link_decomp_stat; ipc = is->link_decompressor; ri = is; } else { stat = master->decomp_stat; ipc = master->decompressor; ri = master; } if (!ipc) { // no decompressor -> we can't decompress. printk(KERN_DEBUG "ippp: no decompressor defined!\n"); return skb; } BUG_ON(!stat); // if we have a compressor, stat has been set as well if((master && *proto == PPP_COMP) || (!master && *proto == PPP_COMPFRAG) ) { // compressed packets are compressed by their protocol type // Set up reset params for the decompressor memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; skb_out = dev_alloc_skb(is->mru + PPP_HDRLEN); if (!skb_out) { kfree_skb(skb); printk(KERN_ERR "ippp: decomp memory allocation failure\n"); return NULL; } len = ipc->decompress(stat, skb, skb_out, &rsparm); kfree_skb(skb); if (len <= 0) { switch(len) { case DECOMP_ERROR: printk(KERN_INFO "ippp: decomp wants reset %s params\n", rsparm.valid ? "with" : "without"); isdn_ppp_ccp_reset_trans(ri, &rsparm); break; case DECOMP_FATALERROR: ri->pppcfg |= SC_DC_FERROR; /* Kick ipppd to recognize the error */ isdn_ppp_ccp_kickup(ri); break; } kfree_skb(skb_out); return NULL; } *proto = isdn_ppp_strip_proto(skb_out); if (*proto < 0) { kfree_skb(skb_out); return NULL; } return skb_out; } else { // uncompressed packets are fed through the decompressor to // update the decompressor state ipc->incomp(stat, skb, *proto); return skb; } } /* * compress a frame * type=0: normal/bundle compression * =1: link compression * returns original skb if we haven't compressed the frame * and a new skb pointer if we've done it */ static struct sk_buff *isdn_ppp_compress(struct sk_buff *skb_in,int *proto, struct ippp_struct *is,struct ippp_struct *master,int type) { int ret; int new_proto; struct isdn_ppp_compressor *compressor; void *stat; struct sk_buff *skb_out; /* we do not compress control protocols */ if(*proto < 0 || *proto > 0x3fff) { return skb_in; } if(type) { /* type=1 => Link compression */ return skb_in; } else { if(!master) { compressor = is->compressor; stat = is->comp_stat; } else { compressor = master->compressor; stat = master->comp_stat; } new_proto = PPP_COMP; } if(!compressor) { printk(KERN_ERR "isdn_ppp: No compressor set!\n"); return skb_in; } if(!stat) { printk(KERN_ERR "isdn_ppp: Compressor not initialized?\n"); return skb_in; } /* Allow for at least 150 % expansion (for now) */ skb_out = alloc_skb(skb_in->len + skb_in->len/2 + 32 + skb_headroom(skb_in), GFP_ATOMIC); if(!skb_out) return skb_in; skb_reserve(skb_out, skb_headroom(skb_in)); ret = (compressor->compress)(stat,skb_in,skb_out,*proto); if(!ret) { dev_kfree_skb(skb_out); return skb_in; } dev_kfree_skb(skb_in); *proto = new_proto; return skb_out; } /* * we received a CCP frame .. * not a clean solution, but we MUST handle a few cases in the kernel */ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb,int proto) { struct ippp_struct *is; struct ippp_struct *mis; int len; struct isdn_ppp_resetparams rsparm; unsigned char rsdata[IPPP_RESET_MAXDATABYTES]; printk(KERN_DEBUG "Received CCP frame from peer slot(%d)\n", lp->ppp_slot); if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, lp->ppp_slot); return; } is = ippp_table[lp->ppp_slot]; isdn_ppp_frame_log("ccp-rcv", skb->data, skb->len, 32, is->unit,lp->ppp_slot); if(lp->master) { int slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; switch(skb->data[0]) { case CCP_CONFREQ: if(is->debug & 0x10) printk(KERN_DEBUG "Disable compression here!\n"); if(proto == PPP_CCP) mis->compflags &= ~SC_COMP_ON; else is->compflags &= ~SC_LINK_COMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if(is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if(proto == PPP_CCP) mis->compflags &= ~(SC_DECOMP_ON|SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON|SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we RECEIVE an ackowledge we enable the decompressor */ if(is->debug & 0x10) printk(KERN_DEBUG "Enable decompression here!\n"); if(proto == PPP_CCP) { if (!mis->decompressor) break; mis->compflags |= SC_DECOMP_ON; } else { if (!is->decompressor) break; is->compflags |= SC_LINK_DECOMP_ON; } break; case CCP_RESETACK: printk(KERN_DEBUG "Received ResetAck from peer\n"); len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if(proto == PPP_CCP) { /* If a reset Ack was outstanding for this id, then clean up the state engine */ isdn_ppp_ccp_reset_ack_rcvd(mis, skb->data[1]); if(mis->decompressor && mis->decomp_stat) mis->decompressor-> reset(mis->decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: This is not easy to decide here */ mis->compflags &= ~SC_DECOMP_DISCARD; } else { isdn_ppp_ccp_reset_ack_rcvd(is, skb->data[1]); if(is->link_decompressor && is->link_decomp_stat) is->link_decompressor-> reset(is->link_decomp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, NULL); /* TODO: neither here */ is->compflags &= ~SC_LINK_DECOMP_DISCARD; } break; case CCP_RESETREQ: printk(KERN_DEBUG "Received ResetReq from peer\n"); /* Receiving a ResetReq means we must reset our compressor */ /* Set up reset params for the reset entry */ memset(&rsparm, 0, sizeof(rsparm)); rsparm.data = rsdata; rsparm.maxdlen = IPPP_RESET_MAXDATABYTES; /* Isolate data length */ len = (skb->data[2] << 8) | skb->data[3]; len -= 4; if(proto == PPP_CCP) { if(mis->compressor && mis->comp_stat) mis->compressor-> reset(mis->comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } else { if(is->link_compressor && is->link_comp_stat) is->link_compressor-> reset(is->link_comp_stat, skb->data[0], skb->data[1], len ? &skb->data[4] : NULL, len, &rsparm); } /* Ack the Req as specified by rsparm */ if(rsparm.valid) { /* Compressor reset handler decided how to answer */ if(rsparm.rsend) { /* We should send a Frame */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, rsparm.idval ? rsparm.id : skb->data[1], rsparm.dtval ? rsparm.data : NULL, rsparm.dtval ? rsparm.dlen : 0); } else { printk(KERN_DEBUG "ResetAck suppressed\n"); } } else { /* We answer with a straight reflected Ack */ isdn_ppp_ccp_xmit_reset(is, proto, CCP_RESETACK, skb->data[1], len ? &skb->data[4] : NULL, len); } break; } } /* * Daemon sends a CCP frame ... */ /* TODO: Clean this up with new Reset semantics */ /* I believe the CCP handling as-is is done wrong. Compressed frames * should only be sent/received after CCP reaches UP state, which means * both sides have sent CONF_ACK. Currently, we handle both directions * independently, which means we may accept compressed frames too early * (supposedly not a problem), but may also mean we send compressed frames * too early, which may turn out to be a problem. * This part of state machine should actually be handled by (i)pppd, but * that's too big of a change now. --kai */ /* Actually, we might turn this into an advantage: deal with the RFC in * the old tradition of beeing generous on what we accept, but beeing * strict on what we send. Thus we should just * - accept compressed frames as soon as decompression is negotiated * - send compressed frames only when decomp *and* comp are negotiated * - drop rx compressed frames if we cannot decomp (instead of pushing them * up to ipppd) * and I tried to modify this file according to that. --abp */ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *skb) { struct ippp_struct *mis,*is; int proto, slot = lp->ppp_slot; unsigned char *data; if(!skb || skb->len < 3) return; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", __func__, slot); return; } is = ippp_table[slot]; /* Daemon may send with or without address and control field comp */ data = skb->data; if(!(is->pppcfg & SC_COMP_AC) && data[0] == 0xff && data[1] == 0x03) { data += 2; if(skb->len < 5) return; } proto = ((int)data[0]<<8)+data[1]; if(proto != PPP_CCP && proto != PPP_CCPFRAG) return; printk(KERN_DEBUG "Received CCP frame from daemon:\n"); isdn_ppp_frame_log("ccp-xmit", skb->data, skb->len, 32, is->unit,lp->ppp_slot); if (lp->master) { slot = ISDN_MASTER_PRIV(lp)->ppp_slot; if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { printk(KERN_ERR "%s: slot(%d) out of range\n", __func__, slot); return; } mis = ippp_table[slot]; } else mis = is; if (mis != is) printk(KERN_DEBUG "isdn_ppp: Ouch! Master CCP sends on slave slot!\n"); switch(data[2]) { case CCP_CONFREQ: if(is->debug & 0x10) printk(KERN_DEBUG "Disable decompression here!\n"); if(proto == PPP_CCP) is->compflags &= ~SC_DECOMP_ON; else is->compflags &= ~SC_LINK_DECOMP_ON; break; case CCP_TERMREQ: case CCP_TERMACK: if(is->debug & 0x10) printk(KERN_DEBUG "Disable (de)compression here!\n"); if(proto == PPP_CCP) is->compflags &= ~(SC_DECOMP_ON|SC_COMP_ON); else is->compflags &= ~(SC_LINK_DECOMP_ON|SC_LINK_COMP_ON); break; case CCP_CONFACK: /* if we SEND an ackowledge we can/must enable the compressor */ if(is->debug & 0x10) printk(KERN_DEBUG "Enable compression here!\n"); if(proto == PPP_CCP) { if (!is->compressor) break; is->compflags |= SC_COMP_ON; } else { if (!is->compressor) break; is->compflags |= SC_LINK_COMP_ON; } break; case CCP_RESETACK: /* If we send a ACK we should reset our compressor */ if(is->debug & 0x10) printk(KERN_DEBUG "Reset decompression state here!\n"); printk(KERN_DEBUG "ResetAck from daemon passed by\n"); if(proto == PPP_CCP) { /* link to master? */ if(is->compressor && is->comp_stat) is->compressor->reset(is->comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_COMP_DISCARD; } else { if(is->link_compressor && is->link_comp_stat) is->link_compressor->reset(is->link_comp_stat, 0, 0, NULL, 0, NULL); is->compflags &= ~SC_LINK_COMP_DISCARD; } break; case CCP_RESETREQ: /* Just let it pass by */ printk(KERN_DEBUG "ResetReq from daemon passed by\n"); break; } } int isdn_ppp_register_compressor(struct isdn_ppp_compressor *ipc) { ipc->next = ipc_head; ipc->prev = NULL; if(ipc_head) { ipc_head->prev = ipc; } ipc_head = ipc; return 0; } int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *ipc) { if(ipc->prev) ipc->prev->next = ipc->next; else ipc_head = ipc->next; if(ipc->next) ipc->next->prev = ipc->prev; ipc->prev = ipc->next = NULL; return 0; } static int isdn_ppp_set_compressor(struct ippp_struct *is, struct isdn_ppp_comp_data *data) { struct isdn_ppp_compressor *ipc = ipc_head; int ret; void *stat; int num = data->num; if(is->debug & 0x10) printk(KERN_DEBUG "[%d] Set %s type %d\n",is->unit, (data->flags&IPPP_COMP_FLAG_XMIT)?"compressor":"decompressor",num); /* If is has no valid reset state vector, we cannot allocate a decompressor. The decompressor would cause reset transactions sooner or later, and they need that vector. */ if(!(data->flags & IPPP_COMP_FLAG_XMIT) && !is->reset) { printk(KERN_ERR "ippp_ccp: no reset data structure - can't" " allow decompression.\n"); return -ENOMEM; } while(ipc) { if(ipc->num == num) { stat = ipc->alloc(data); if(stat) { ret = ipc->init(stat,data,is->unit,0); if(!ret) { printk(KERN_ERR "Can't init (de)compression!\n"); ipc->free(stat); stat = NULL; break; } } else { printk(KERN_ERR "Can't alloc (de)compression!\n"); break; } if(data->flags & IPPP_COMP_FLAG_XMIT) { if(data->flags & IPPP_COMP_FLAG_LINK) { if(is->link_comp_stat) is->link_compressor->free(is->link_comp_stat); is->link_comp_stat = stat; is->link_compressor = ipc; } else { if(is->comp_stat) is->compressor->free(is->comp_stat); is->comp_stat = stat; is->compressor = ipc; } } else { if(data->flags & IPPP_COMP_FLAG_LINK) { if(is->link_decomp_stat) is->link_decompressor->free(is->link_decomp_stat); is->link_decomp_stat = stat; is->link_decompressor = ipc; } else { if(is->decomp_stat) is->decompressor->free(is->decomp_stat); is->decomp_stat = stat; is->decompressor = ipc; } } return 0; } ipc = ipc->next; } return -EINVAL; }
gpl-2.0
Xanwar/android_kernel_asus_a400cg
arch/s390/mm/extmem.c
3429
18869
/* * Author(s)......: Carsten Otte <cotte@de.ibm.com> * Rob M van der Heij <rvdheij@nl.ibm.com> * Steven Shultz <shultzss@us.ibm.com> * Bugreports.to..: <Linux390@de.ibm.com> * Copyright IBM Corp. 2002, 2004 */ #define KMSG_COMPONENT "extmem" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bootmem.h> #include <linux/ctype.h> #include <linux/ioport.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/ebcdic.h> #include <asm/errno.h> #include <asm/extmem.h> #include <asm/cpcmd.h> #include <asm/setup.h> #define DCSS_LOADSHR 0x00 #define DCSS_LOADNSR 0x04 #define DCSS_PURGESEG 0x08 #define DCSS_FINDSEG 0x0c #define DCSS_LOADNOLY 0x10 #define DCSS_SEGEXT 0x18 #define DCSS_LOADSHRX 0x20 #define DCSS_LOADNSRX 0x24 #define DCSS_FINDSEGX 0x2c #define DCSS_SEGEXTX 0x38 #define DCSS_FINDSEGA 0x0c struct qrange { unsigned long start; /* last byte type */ unsigned long end; /* last byte reserved */ }; struct qout64 { unsigned long segstart; unsigned long segend; int segcnt; int segrcnt; struct qrange range[6]; }; #ifdef CONFIG_64BIT struct qrange_old { unsigned int start; /* last byte type */ unsigned int end; /* last byte reserved */ }; /* output area format for the Diag x'64' old subcode x'18' */ struct qout64_old { int segstart; int segend; int segcnt; int segrcnt; struct qrange_old range[6]; }; #endif struct qin64 { char qopcode; char rsrv1[3]; char qrcode; char rsrv2[3]; char qname[8]; unsigned int qoutptr; short int qoutlen; }; struct dcss_segment { struct list_head list; char dcss_name[8]; char res_name[15]; unsigned long start_addr; unsigned long end; atomic_t ref_count; int do_nonshared; unsigned int vm_segtype; struct qrange range[6]; int segcnt; struct resource *res; }; static DEFINE_MUTEX(dcss_lock); static LIST_HEAD(dcss_list); static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", "EW/EN-MIXED" }; static int loadshr_scode, loadnsr_scode, findseg_scode; static int segext_scode, purgeseg_scode; static int scode_set; /* set correct Diag x'64' subcodes. */ static int dcss_set_subcodes(void) { #ifdef CONFIG_64BIT char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; if (name == NULL) return -ENOMEM; rx = (unsigned long) name; ry = DCSS_FINDSEGX; strcpy(name, "dummy"); asm volatile( " diag %0,%1,0x64\n" "0: ipm %2\n" " srl %2,28\n" " j 2f\n" "1: la %2,3\n" "2:\n" EX_TABLE(0b, 1b) : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); kfree(name); /* Diag x'64' new subcodes are supported, set to new subcodes */ if (rc != 3) { loadshr_scode = DCSS_LOADSHRX; loadnsr_scode = DCSS_LOADNSRX; purgeseg_scode = DCSS_PURGESEG; findseg_scode = DCSS_FINDSEGX; segext_scode = DCSS_SEGEXTX; return 0; } #endif /* Diag x'64' new subcodes are not supported, set to old subcodes */ loadshr_scode = DCSS_LOADNOLY; loadnsr_scode = DCSS_LOADNSR; purgeseg_scode = DCSS_PURGESEG; findseg_scode = DCSS_FINDSEG; segext_scode = DCSS_SEGEXT; return 0; } /* * Create the 8 bytes, ebcdic VM segment name from * an ascii name. */ static void dcss_mkname(char *name, char *dcss_name) { int i; for (i = 0; i < 8; i++) { if (name[i] == '\0') break; dcss_name[i] = toupper(name[i]); }; for (; i < 8; i++) dcss_name[i] = ' '; ASCEBC(dcss_name, 8); } /* * search all segments in dcss_list, and return the one * namend *name. If not found, return NULL. */ static struct dcss_segment * segment_by_name (char *name) { char dcss_name[9]; struct list_head *l; struct dcss_segment *tmp, *retval = NULL; BUG_ON(!mutex_is_locked(&dcss_lock)); dcss_mkname (name, dcss_name); list_for_each (l, &dcss_list) { tmp = list_entry (l, struct dcss_segment, list); if (memcmp(tmp->dcss_name, dcss_name, 8) == 0) { retval = tmp; break; } } return retval; } /* * Perform a function on a dcss segment. */ static inline int dcss_diag(int *func, void *parameter, unsigned long *ret1, unsigned long *ret2) { unsigned long rx, ry; int rc; if (scode_set == 0) { rc = dcss_set_subcodes(); if (rc < 0) return rc; scode_set = 1; } rx = (unsigned long) parameter; ry = (unsigned long) *func; #ifdef CONFIG_64BIT /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ if (*func > DCSS_SEGEXT) asm volatile( " diag %0,%1,0x64\n" " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */ else asm volatile( " sam31\n" " diag %0,%1,0x64\n" " sam64\n" " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); #else asm volatile( " diag %0,%1,0x64\n" " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); #endif *ret1 = rx; *ret2 = ry; return rc; } static inline int dcss_diag_translate_rc (int vm_rc) { if (vm_rc == 44) return -ENOENT; return -EIO; } /* do a diag to get info about a segment. * fills start_address, end and vm_segtype fields */ static int query_segment_type (struct dcss_segment *seg) { unsigned long dummy, vmrc; int diag_cc, rc, i; struct qout64 *qout; struct qin64 *qin; qin = kmalloc(sizeof(*qin), GFP_KERNEL | GFP_DMA); qout = kmalloc(sizeof(*qout), GFP_KERNEL | GFP_DMA); if ((qin == NULL) || (qout == NULL)) { rc = -ENOMEM; goto out_free; } /* initialize diag input parameters */ qin->qopcode = DCSS_FINDSEGA; qin->qoutptr = (unsigned long) qout; qin->qoutlen = sizeof(struct qout64); memcpy (qin->qname, seg->dcss_name, 8); diag_cc = dcss_diag(&segext_scode, qin, &dummy, &vmrc); if (diag_cc < 0) { rc = diag_cc; goto out_free; } if (diag_cc > 1) { pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc); rc = dcss_diag_translate_rc (vmrc); goto out_free; } #ifdef CONFIG_64BIT /* Only old format of output area of Diagnose x'64' is supported, copy data for the new format. */ if (segext_scode == DCSS_SEGEXT) { struct qout64_old *qout_old; qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA); if (qout_old == NULL) { rc = -ENOMEM; goto out_free; } memcpy(qout_old, qout, sizeof(struct qout64_old)); qout->segstart = (unsigned long) qout_old->segstart; qout->segend = (unsigned long) qout_old->segend; qout->segcnt = qout_old->segcnt; qout->segrcnt = qout_old->segrcnt; if (qout->segcnt > 6) qout->segrcnt = 6; for (i = 0; i < qout->segrcnt; i++) { qout->range[i].start = (unsigned long) qout_old->range[i].start; qout->range[i].end = (unsigned long) qout_old->range[i].end; } kfree(qout_old); } #endif if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; } if (qout->segcnt == 1) { seg->vm_segtype = qout->range[0].start & 0xff; } else { /* multi-part segment. only one type supported here: - all parts are contiguous - all parts are either EW or EN type - maximum 6 parts allowed */ unsigned long start = qout->segstart >> PAGE_SHIFT; for (i=0; i<qout->segcnt; i++) { if (((qout->range[i].start & 0xff) != SEG_TYPE_EW) && ((qout->range[i].start & 0xff) != SEG_TYPE_EN)) { rc = -EOPNOTSUPP; goto out_free; } if (start != qout->range[i].start >> PAGE_SHIFT) { rc = -EOPNOTSUPP; goto out_free; } start = (qout->range[i].end >> PAGE_SHIFT) + 1; } seg->vm_segtype = SEG_TYPE_EWEN; } /* analyze diag output and update seg */ seg->start_addr = qout->segstart; seg->end = qout->segend; memcpy (seg->range, qout->range, 6*sizeof(struct qrange)); seg->segcnt = qout->segcnt; rc = 0; out_free: kfree(qin); kfree(qout); return rc; } /* * get info about a segment * possible return values: * -ENOSYS : we are not running on VM * -EIO : could not perform query diagnose * -ENOENT : no such segment * -EOPNOTSUPP: multi-part segment cannot be used with linux * -ENOMEM : out of memory * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h */ int segment_type (char* name) { int rc; struct dcss_segment seg; if (!MACHINE_IS_VM) return -ENOSYS; dcss_mkname(name, seg.dcss_name); rc = query_segment_type (&seg); if (rc < 0) return rc; return seg.vm_segtype; } /* * check if segment collides with other segments that are currently loaded * returns 1 if this is the case, 0 if no collision was found */ static int segment_overlaps_others (struct dcss_segment *seg) { struct list_head *l; struct dcss_segment *tmp; BUG_ON(!mutex_is_locked(&dcss_lock)); list_for_each(l, &dcss_list) { tmp = list_entry(l, struct dcss_segment, list); if ((tmp->start_addr >> 20) > (seg->end >> 20)) continue; if ((tmp->end >> 20) < (seg->start_addr >> 20)) continue; if (seg == tmp) continue; return 1; } return 0; } /* * real segment loading function, called from segment_load */ static int __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) { unsigned long start_addr, end_addr, dummy; struct dcss_segment *seg; int rc, diag_cc; start_addr = end_addr = 0; seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA); if (seg == NULL) { rc = -ENOMEM; goto out; } dcss_mkname (name, seg->dcss_name); rc = query_segment_type (seg); if (rc < 0) goto out_free; if (loadshr_scode == DCSS_LOADSHRX) { if (segment_overlaps_others(seg)) { rc = -EBUSY; goto out_free; } } rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); if (rc) goto out_free; seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (seg->res == NULL) { rc = -ENOMEM; goto out_shared; } seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; seg->res->start = seg->start_addr; seg->res->end = seg->end; memcpy(&seg->res_name, seg->dcss_name, 8); EBCASC(seg->res_name, 8); seg->res_name[8] = '\0'; strncat(seg->res_name, " (DCSS)", 7); seg->res->name = seg->res_name; rc = seg->vm_segtype; if (rc == SEG_TYPE_SC || ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) seg->res->flags |= IORESOURCE_READONLY; if (request_resource(&iomem_resource, seg->res)) { rc = -EBUSY; kfree(seg->res); goto out_shared; } if (do_nonshared) diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name, &start_addr, &end_addr); else diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name, &start_addr, &end_addr); if (diag_cc < 0) { dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); rc = diag_cc; goto out_resource; } if (diag_cc > 1) { pr_warning("Loading DCSS %s failed with rc=%ld\n", name, end_addr); rc = dcss_diag_translate_rc(end_addr); dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); goto out_resource; } seg->start_addr = start_addr; seg->end = end_addr; seg->do_nonshared = do_nonshared; atomic_set(&seg->ref_count, 1); list_add(&seg->list, &dcss_list); *addr = seg->start_addr; *end = seg->end; if (do_nonshared) pr_info("DCSS %s of range %p to %p and type %s loaded as " "exclusive-writable\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); else { pr_info("DCSS %s of range %p to %p and type %s loaded in " "shared access mode\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); } goto out; out_resource: release_resource(seg->res); kfree(seg->res); out_shared: vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); out_free: kfree(seg); out: return rc; } /* * this function loads a DCSS segment * name : name of the DCSS * do_nonshared : 0 indicates that the dcss should be shared with other linux images * 1 indicates that the dcss should be exclusive for this linux image * addr : will be filled with start address of the segment * end : will be filled with end address of the segment * return values: * -ENOSYS : we are not running on VM * -EIO : could not perform query or load diagnose * -ENOENT : no such segment * -EOPNOTSUPP: multi-part segment cannot be used with linux * -ENOSPC : segment cannot be used (overlaps with storage) * -EBUSY : segment can temporarily not be used (overlaps with dcss) * -ERANGE : segment cannot be used (exceeds kernel mapping range) * -EPERM : segment is currently loaded with incompatible permissions * -ENOMEM : out of memory * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h */ int segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end) { struct dcss_segment *seg; int rc; if (!MACHINE_IS_VM) return -ENOSYS; mutex_lock(&dcss_lock); seg = segment_by_name (name); if (seg == NULL) rc = __segment_load (name, do_nonshared, addr, end); else { if (do_nonshared == seg->do_nonshared) { atomic_inc(&seg->ref_count); *addr = seg->start_addr; *end = seg->end; rc = seg->vm_segtype; } else { *addr = *end = 0; rc = -EPERM; } } mutex_unlock(&dcss_lock); return rc; } /* * this function modifies the shared state of a DCSS segment. note that * name : name of the DCSS * do_nonshared : 0 indicates that the dcss should be shared with other linux images * 1 indicates that the dcss should be exclusive for this linux image * return values: * -EIO : could not perform load diagnose (segment gone!) * -ENOENT : no such segment (segment gone!) * -EAGAIN : segment is in use by other exploiters, try later * -EINVAL : no segment with the given name is currently loaded - name invalid * -EBUSY : segment can temporarily not be used (overlaps with dcss) * 0 : operation succeeded */ int segment_modify_shared (char *name, int do_nonshared) { struct dcss_segment *seg; unsigned long start_addr, end_addr, dummy; int rc, diag_cc; start_addr = end_addr = 0; mutex_lock(&dcss_lock); seg = segment_by_name (name); if (seg == NULL) { rc = -EINVAL; goto out_unlock; } if (do_nonshared == seg->do_nonshared) { pr_info("DCSS %s is already in the requested access " "mode\n", name); rc = 0; goto out_unlock; } if (atomic_read (&seg->ref_count) != 1) { pr_warning("DCSS %s is in use and cannot be reloaded\n", name); rc = -EAGAIN; goto out_unlock; } release_resource(seg->res); if (do_nonshared) seg->res->flags &= ~IORESOURCE_READONLY; else if (seg->vm_segtype == SEG_TYPE_SR || seg->vm_segtype == SEG_TYPE_ER) seg->res->flags |= IORESOURCE_READONLY; if (request_resource(&iomem_resource, seg->res)) { pr_warning("DCSS %s overlaps with used memory resources " "and cannot be reloaded\n", name); rc = -EBUSY; kfree(seg->res); goto out_del_mem; } dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); if (do_nonshared) diag_cc = dcss_diag(&loadnsr_scode, seg->dcss_name, &start_addr, &end_addr); else diag_cc = dcss_diag(&loadshr_scode, seg->dcss_name, &start_addr, &end_addr); if (diag_cc < 0) { rc = diag_cc; goto out_del_res; } if (diag_cc > 1) { pr_warning("Reloading DCSS %s failed with rc=%ld\n", name, end_addr); rc = dcss_diag_translate_rc(end_addr); goto out_del_res; } seg->start_addr = start_addr; seg->end = end_addr; seg->do_nonshared = do_nonshared; rc = 0; goto out_unlock; out_del_res: release_resource(seg->res); kfree(seg->res); out_del_mem: vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); kfree(seg); out_unlock: mutex_unlock(&dcss_lock); return rc; } /* * Decrease the use count of a DCSS segment and remove * it from the address space if nobody is using it * any longer. */ void segment_unload(char *name) { unsigned long dummy; struct dcss_segment *seg; if (!MACHINE_IS_VM) return; mutex_lock(&dcss_lock); seg = segment_by_name (name); if (seg == NULL) { pr_err("Unloading unknown DCSS %s failed\n", name); goto out_unlock; } if (atomic_dec_return(&seg->ref_count) != 0) goto out_unlock; release_resource(seg->res); kfree(seg->res); vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); kfree(seg); out_unlock: mutex_unlock(&dcss_lock); } /* * save segment content permanently */ void segment_save(char *name) { struct dcss_segment *seg; char cmd1[160]; char cmd2[80]; int i, response; if (!MACHINE_IS_VM) return; mutex_lock(&dcss_lock); seg = segment_by_name (name); if (seg == NULL) { pr_err("Saving unknown DCSS %s failed\n", name); goto out; } sprintf(cmd1, "DEFSEG %s", name); for (i=0; i<seg->segcnt; i++) { sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", seg->range[i].start >> PAGE_SHIFT, seg->range[i].end >> PAGE_SHIFT, segtype_string[seg->range[i].start & 0xff]); } sprintf(cmd2, "SAVESEG %s", name); response = 0; cpcmd(cmd1, NULL, 0, &response); if (response) { pr_err("Saving a DCSS failed with DEFSEG response code " "%i\n", response); goto out; } cpcmd(cmd2, NULL, 0, &response); if (response) { pr_err("Saving a DCSS failed with SAVESEG response code " "%i\n", response); goto out; } out: mutex_unlock(&dcss_lock); } /* * print appropriate error message for segment_load()/segment_type() * return code */ void segment_warning(int rc, char *seg_name) { switch (rc) { case -ENOENT: pr_err("DCSS %s cannot be loaded or queried\n", seg_name); break; case -ENOSYS: pr_err("DCSS %s cannot be loaded or queried without " "z/VM\n", seg_name); break; case -EIO: pr_err("Loading or querying DCSS %s resulted in a " "hardware error\n", seg_name); break; case -EOPNOTSUPP: pr_err("DCSS %s has multiple page ranges and cannot be " "loaded or queried\n", seg_name); break; case -ENOSPC: pr_err("DCSS %s overlaps with used storage and cannot " "be loaded\n", seg_name); break; case -EBUSY: pr_err("%s needs used memory resources and cannot be " "loaded or queried\n", seg_name); break; case -EPERM: pr_err("DCSS %s is already loaded in a different access " "mode\n", seg_name); break; case -ENOMEM: pr_err("There is not enough memory to load or query " "DCSS %s\n", seg_name); break; case -ERANGE: pr_err("DCSS %s exceeds the kernel mapping range (%lu) " "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS); break; default: break; } } EXPORT_SYMBOL(segment_load); EXPORT_SYMBOL(segment_unload); EXPORT_SYMBOL(segment_save); EXPORT_SYMBOL(segment_type); EXPORT_SYMBOL(segment_modify_shared); EXPORT_SYMBOL(segment_warning);
gpl-2.0
cameron581/kernel_msm
drivers/staging/iio/gyro/adxrs450_core.c
4965
10111
/* * ADXRS450/ADXRS453 Digital Output Gyroscope Driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "adxrs450.h" /** * adxrs450_spi_read_reg_16() - read 2 bytes from a register pair * @dev: device associated with child of actual iio_dev * @reg_address: the address of the lower of the two registers,which should be an even address, * Second register's address is reg_address + 1. * @val: somewhere to pass back the value read **/ static int adxrs450_spi_read_reg_16(struct iio_dev *indio_dev, u8 reg_address, u16 *val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_READ_DATA | (reg_address >> 7); st->tx[1] = reg_address << 1; st->tx[2] = 0; st->tx[3] = 0; if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1)) st->tx[3] |= ADXRS450_P; ret = spi_write(st->us, st->tx, 4); if (ret) { dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n", reg_address); goto error_ret; } ret = spi_read(st->us, st->rx, 4); if (ret) { dev_err(&st->us->dev, "problem while reading 16 bit register 0x%02x\n", reg_address); goto error_ret; } *val = (be32_to_cpu(*(u32 *)st->rx) >> 5) & 0xFFFF; error_ret: mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_write_reg_16() - write 2 bytes data to a register pair * @dev: device associated with child of actual actual iio_dev * @reg_address: the address of the lower of the two registers,which should be an even address, * Second register's address is reg_address + 1. * @val: value to be written. **/ static int adxrs450_spi_write_reg_16(struct iio_dev *indio_dev, u8 reg_address, u16 val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_WRITE_DATA | reg_address >> 7; st->tx[1] = reg_address << 1 | val >> 15; st->tx[2] = val >> 7; st->tx[3] = val << 1; if (!(hweight32(be32_to_cpu(*(u32 *)st->tx)) & 1)) st->tx[3] |= ADXRS450_P; ret = spi_write(st->us, st->tx, 4); if (ret) dev_err(&st->us->dev, "problem while writing 16 bit register 0x%02x\n", reg_address); msleep(1); /* enforce sequential transfer delay 0.1ms */ mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_sensor_data() - read 2 bytes sensor data * @dev: device associated with child of actual iio_dev * @val: somewhere to pass back the value read **/ static int adxrs450_spi_sensor_data(struct iio_dev *indio_dev, s16 *val) { struct adxrs450_state *st = iio_priv(indio_dev); int ret; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_SENSOR_DATA; st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; ret = spi_write(st->us, st->tx, 4); if (ret) { dev_err(&st->us->dev, "Problem while reading sensor data\n"); goto error_ret; } ret = spi_read(st->us, st->rx, 4); if (ret) { dev_err(&st->us->dev, "Problem while reading sensor data\n"); goto error_ret; } *val = (be32_to_cpu(*(u32 *)st->rx) >> 10) & 0xFFFF; error_ret: mutex_unlock(&st->buf_lock); return ret; } /** * adxrs450_spi_initial() - use for initializing procedure. * @st: device instance specific data * @val: somewhere to pass back the value read **/ static int adxrs450_spi_initial(struct adxrs450_state *st, u32 *val, char chk) { struct spi_message msg; int ret; struct spi_transfer xfers = { .tx_buf = st->tx, .rx_buf = st->rx, .bits_per_word = 8, .len = 4, }; mutex_lock(&st->buf_lock); st->tx[0] = ADXRS450_SENSOR_DATA; st->tx[1] = 0; st->tx[2] = 0; st->tx[3] = 0; if (chk) st->tx[3] |= (ADXRS450_CHK | ADXRS450_P); spi_message_init(&msg); spi_message_add_tail(&xfers, &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "Problem while reading initializing data\n"); goto error_ret; } *val = be32_to_cpu(*(u32 *)st->rx); error_ret: mutex_unlock(&st->buf_lock); return ret; } /* Recommended Startup Sequence by spec */ static int adxrs450_initial_setup(struct iio_dev *indio_dev) { u32 t; u16 data; int ret; struct adxrs450_state *st = iio_priv(indio_dev); msleep(ADXRS450_STARTUP_DELAY*2); ret = adxrs450_spi_initial(st, &t, 1); if (ret) return ret; if (t != 0x01) dev_warn(&st->us->dev, "The initial power on response " "is not correct! Restart without reset?\n"); msleep(ADXRS450_STARTUP_DELAY); ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; msleep(ADXRS450_STARTUP_DELAY); ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) { dev_err(&st->us->dev, "The second response is not correct!\n"); return -EIO; } ret = adxrs450_spi_initial(st, &t, 0); if (ret) return ret; if (((t & 0xff) | 0x01) != 0xff || ADXRS450_GET_ST(t) != 2) { dev_err(&st->us->dev, "The third response is not correct!\n"); return -EIO; } ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_FAULT1, &data); if (ret) return ret; if (data & 0x0fff) { dev_err(&st->us->dev, "The device is not in normal status!\n"); return -EINVAL; } ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_PID1, &data); if (ret) return ret; dev_info(&st->us->dev, "The Part ID is 0x%x\n", data); ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNL, &data); if (ret) return ret; t = data; ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_SNH, &data); if (ret) return ret; t |= data << 16; dev_info(&st->us->dev, "The Serial Number is 0x%x\n", t); return 0; } static int adxrs450_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int ret; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: ret = adxrs450_spi_write_reg_16(indio_dev, ADXRS450_DNC1, val & 0x3FF); break; default: ret = -EINVAL; break; } return ret; } static int adxrs450_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { int ret; s16 t; switch (mask) { case 0: switch (chan->type) { case IIO_ANGL_VEL: ret = adxrs450_spi_sensor_data(indio_dev, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; case IIO_TEMP: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_TEMP1, &t); if (ret) break; *val = (t >> 6) + 225; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } break; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_ANGL_VEL: *val = 0; *val2 = 218166; return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: *val = 200; *val2 = 0; return IIO_VAL_INT; default: return -EINVAL; } break; case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; case IIO_CHAN_INFO_CALIBBIAS: ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t); if (ret) break; *val = t; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } return ret; } static const struct iio_chan_spec adxrs450_channels[2][2] = { [ID_ADXRS450] = { { .type = IIO_ANGL_VEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, } }, [ID_ADXRS453] = { { .type = IIO_ANGL_VEL, .modified = 1, .channel2 = IIO_MOD_Z, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT, }, { .type = IIO_TEMP, .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, } }, }; static const struct iio_info adxrs450_info = { .driver_module = THIS_MODULE, .read_raw = &adxrs450_read_raw, .write_raw = &adxrs450_write_raw, }; static int __devinit adxrs450_probe(struct spi_device *spi) { int ret; struct adxrs450_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); st->us = spi; mutex_init(&st->buf_lock); /* This is only used for removal purposes */ spi_set_drvdata(spi, indio_dev); indio_dev->dev.parent = &spi->dev; indio_dev->info = &adxrs450_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = adxrs450_channels[spi_get_device_id(spi)->driver_data]; indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels); indio_dev->name = spi->dev.driver->name; ret = iio_device_register(indio_dev); if (ret) goto error_free_dev; /* Get the device into a sane initial state */ ret = adxrs450_initial_setup(indio_dev); if (ret) goto error_initial; return 0; error_initial: iio_device_unregister(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adxrs450_remove(struct spi_device *spi) { iio_device_unregister(spi_get_drvdata(spi)); iio_free_device(spi_get_drvdata(spi)); return 0; } static const struct spi_device_id adxrs450_id[] = { {"adxrs450", ID_ADXRS450}, {"adxrs453", ID_ADXRS453}, {} }; MODULE_DEVICE_TABLE(spi, adxrs450_id); static struct spi_driver adxrs450_driver = { .driver = { .name = "adxrs450", .owner = THIS_MODULE, }, .probe = adxrs450_probe, .remove = __devexit_p(adxrs450_remove), .id_table = adxrs450_id, }; module_spi_driver(adxrs450_driver); MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>"); MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
EPDCenter/android_kernel_allwinner_a31_unusual
drivers/staging/iio/gyro/adis16260_core.c
4965
17602
/* * ADIS16260/ADIS16265 Programmable Digital Gyroscope Sensor Driver * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/module.h> #include "../iio.h" #include "../sysfs.h" #include "../buffer.h" #include "adis16260.h" #define DRIVER_NAME "adis16260" static int adis16260_check_status(struct iio_dev *indio_dev); /** * adis16260_spi_write_reg_8() - write single byte to a register * @indio_dev: iio_dev for the device * @reg_address: the address of the register to be written * @val: the value to write **/ static int adis16260_spi_write_reg_8(struct iio_dev *indio_dev, u8 reg_address, u8 val) { int ret; struct adis16260_state *st = iio_priv(indio_dev); mutex_lock(&st->buf_lock); st->tx[0] = ADIS16260_WRITE_REG(reg_address); st->tx[1] = val; ret = spi_write(st->us, st->tx, 2); mutex_unlock(&st->buf_lock); return ret; } /** * adis16260_spi_write_reg_16() - write 2 bytes to a pair of registers * @indio_dev: iio_dev for the device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: value to be written **/ static int adis16260_spi_write_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 value) { int ret; struct spi_message msg; struct adis16260_state *st = iio_priv(indio_dev); struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 20, }, { .tx_buf = st->tx + 2, .bits_per_word = 8, .len = 2, .delay_usecs = 20, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16260_WRITE_REG(lower_reg_address); st->tx[1] = value & 0xFF; st->tx[2] = ADIS16260_WRITE_REG(lower_reg_address + 1); st->tx[3] = (value >> 8) & 0xFF; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); mutex_unlock(&st->buf_lock); return ret; } /** * adis16260_spi_read_reg_16() - read 2 bytes from a 16-bit register * @indio_dev: iio_dev for the device * @reg_address: the address of the lower of the two registers. Second register * is assumed to have address one greater. * @val: somewhere to pass back the value read **/ static int adis16260_spi_read_reg_16(struct iio_dev *indio_dev, u8 lower_reg_address, u16 *val) { struct spi_message msg; struct adis16260_state *st = iio_priv(indio_dev); int ret; struct spi_transfer xfers[] = { { .tx_buf = st->tx, .bits_per_word = 8, .len = 2, .cs_change = 1, .delay_usecs = 30, }, { .rx_buf = st->rx, .bits_per_word = 8, .len = 2, .delay_usecs = 30, }, }; mutex_lock(&st->buf_lock); st->tx[0] = ADIS16260_READ_REG(lower_reg_address); st->tx[1] = 0; spi_message_init(&msg); spi_message_add_tail(&xfers[0], &msg); spi_message_add_tail(&xfers[1], &msg); ret = spi_sync(st->us, &msg); if (ret) { dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X", lower_reg_address); goto error_ret; } *val = (st->rx[0] << 8) | st->rx[1]; error_ret: mutex_unlock(&st->buf_lock); return ret; } static ssize_t adis16260_read_frequency_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16260_state *st = iio_priv(indio_dev); if (spi_get_device_id(st->us)->driver_data) return sprintf(buf, "%s\n", "0.129 ~ 256"); else return sprintf(buf, "%s\n", "256 2048"); } static ssize_t adis16260_read_frequency(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16260_state *st = iio_priv(indio_dev); int ret, len = 0; u16 t; int sps; ret = adis16260_spi_read_reg_16(indio_dev, ADIS16260_SMPL_PRD, &t); if (ret) return ret; if (spi_get_device_id(st->us)->driver_data) /* If an adis16251 */ sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 8 : 256; else sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 66 : 2048; sps /= (t & ADIS16260_SMPL_PRD_DIV_MASK) + 1; len = sprintf(buf, "%d SPS\n", sps); return len; } static ssize_t adis16260_write_frequency(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct adis16260_state *st = iio_priv(indio_dev); long val; int ret; u8 t; ret = strict_strtol(buf, 10, &val); if (ret) return ret; mutex_lock(&indio_dev->mlock); if (spi_get_device_id(st->us)) { t = (256 / val); if (t > 0) t--; t &= ADIS16260_SMPL_PRD_DIV_MASK; } else { t = (2048 / val); if (t > 0) t--; t &= ADIS16260_SMPL_PRD_DIV_MASK; } if ((t & ADIS16260_SMPL_PRD_DIV_MASK) >= 0x0A) st->us->max_speed_hz = ADIS16260_SPI_SLOW; else st->us->max_speed_hz = ADIS16260_SPI_FAST; ret = adis16260_spi_write_reg_8(indio_dev, ADIS16260_SMPL_PRD, t); mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static int adis16260_reset(struct iio_dev *indio_dev) { int ret; ret = adis16260_spi_write_reg_8(indio_dev, ADIS16260_GLOB_CMD, ADIS16260_GLOB_CMD_SW_RESET); if (ret) dev_err(&indio_dev->dev, "problem resetting device"); return ret; } static ssize_t adis16260_write_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); if (len < 1) return -EINVAL; switch (buf[0]) { case '1': case 'y': case 'Y': return adis16260_reset(indio_dev); } return -EINVAL; } int adis16260_set_irq(struct iio_dev *indio_dev, bool enable) { int ret; u16 msc; ret = adis16260_spi_read_reg_16(indio_dev, ADIS16260_MSC_CTRL, &msc); if (ret) goto error_ret; msc |= ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH; if (enable) msc |= ADIS16260_MSC_CTRL_DATA_RDY_EN; else msc &= ~ADIS16260_MSC_CTRL_DATA_RDY_EN; ret = adis16260_spi_write_reg_16(indio_dev, ADIS16260_MSC_CTRL, msc); if (ret) goto error_ret; error_ret: return ret; } /* Power down the device */ static int adis16260_stop_device(struct iio_dev *indio_dev) { int ret; u16 val = ADIS16260_SLP_CNT_POWER_OFF; ret = adis16260_spi_write_reg_16(indio_dev, ADIS16260_SLP_CNT, val); if (ret) dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT"); return ret; } static int adis16260_self_test(struct iio_dev *indio_dev) { int ret; ret = adis16260_spi_write_reg_16(indio_dev, ADIS16260_MSC_CTRL, ADIS16260_MSC_CTRL_MEM_TEST); if (ret) { dev_err(&indio_dev->dev, "problem starting self test"); goto err_ret; } adis16260_check_status(indio_dev); err_ret: return ret; } static int adis16260_check_status(struct iio_dev *indio_dev) { u16 status; int ret; struct device *dev = &indio_dev->dev; ret = adis16260_spi_read_reg_16(indio_dev, ADIS16260_DIAG_STAT, &status); if (ret < 0) { dev_err(dev, "Reading status failed\n"); goto error_ret; } ret = status & 0x7F; if (status & ADIS16260_DIAG_STAT_FLASH_CHK) dev_err(dev, "Flash checksum error\n"); if (status & ADIS16260_DIAG_STAT_SELF_TEST) dev_err(dev, "Self test error\n"); if (status & ADIS16260_DIAG_STAT_OVERFLOW) dev_err(dev, "Sensor overrange\n"); if (status & ADIS16260_DIAG_STAT_SPI_FAIL) dev_err(dev, "SPI failure\n"); if (status & ADIS16260_DIAG_STAT_FLASH_UPT) dev_err(dev, "Flash update failed\n"); if (status & ADIS16260_DIAG_STAT_POWER_HIGH) dev_err(dev, "Power supply above 5.25V\n"); if (status & ADIS16260_DIAG_STAT_POWER_LOW) dev_err(dev, "Power supply below 4.75V\n"); error_ret: return ret; } static int adis16260_initial_setup(struct iio_dev *indio_dev) { int ret; struct device *dev = &indio_dev->dev; /* Disable IRQ */ ret = adis16260_set_irq(indio_dev, false); if (ret) { dev_err(dev, "disable irq failed"); goto err_ret; } /* Do self test */ ret = adis16260_self_test(indio_dev); if (ret) { dev_err(dev, "self test failure"); goto err_ret; } /* Read status register to check the result */ ret = adis16260_check_status(indio_dev); if (ret) { adis16260_reset(indio_dev); dev_err(dev, "device not playing ball -> reset"); msleep(ADIS16260_STARTUP_DELAY); ret = adis16260_check_status(indio_dev); if (ret) { dev_err(dev, "giving up"); goto err_ret; } } err_ret: return ret; } static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, adis16260_read_frequency, adis16260_write_frequency); static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16260_write_reset, 0); static IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, adis16260_read_frequency_available, NULL, 0); enum adis16260_channel { gyro, temp, in_supply, in_aux, angle, }; #define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \ struct iio_chan_spec adis16260_channels_##axis[] = { \ IIO_CHAN(IIO_ANGL_VEL, 1, 0, 0, NULL, 0, mod, \ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ gyro, ADIS16260_SCAN_GYRO, \ IIO_ST('s', 14, 16, 0), 0), \ IIO_CHAN(IIO_ANGL, 1, 0, 0, NULL, 0, mod, \ 0, \ angle, ADIS16260_SCAN_ANGL, \ IIO_ST('u', 14, 16, 0), 0), \ IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, \ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ temp, ADIS16260_SCAN_TEMP, \ IIO_ST('u', 12, 16, 0), 0), \ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, \ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ in_supply, ADIS16260_SCAN_SUPPLY, \ IIO_ST('u', 12, 16, 0), 0), \ IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, \ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ in_aux, ADIS16260_SCAN_AUX_ADC, \ IIO_ST('u', 12, 16, 0), 0), \ IIO_CHAN_SOFT_TIMESTAMP(5) \ } static const ADIS16260_GYRO_CHANNEL_SET(x, IIO_MOD_X); static const ADIS16260_GYRO_CHANNEL_SET(y, IIO_MOD_Y); static const ADIS16260_GYRO_CHANNEL_SET(z, IIO_MOD_Z); static const u8 adis16260_addresses[5][3] = { [gyro] = { ADIS16260_GYRO_OUT, ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE }, [angle] = { ADIS16260_ANGL_OUT }, [in_supply] = { ADIS16260_SUPPLY_OUT }, [in_aux] = { ADIS16260_AUX_ADC }, [temp] = { ADIS16260_TEMP_OUT }, }; static int adis16260_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct adis16260_state *st = iio_priv(indio_dev); int ret; int bits; u8 addr; s16 val16; switch (mask) { case 0: mutex_lock(&indio_dev->mlock); addr = adis16260_addresses[chan->address][0]; ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } if (val16 & ADIS16260_ERROR_ACTIVE) { ret = adis16260_check_status(indio_dev); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') val16 = (s16)(val16 << (16 - chan->scan_type.realbits)) >> (16 - chan->scan_type.realbits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_ANGL_VEL: *val = 0; if (spi_get_device_id(st->us)->driver_data) *val2 = 320; else *val2 = 1278; return IIO_VAL_INT_PLUS_MICRO; case IIO_VOLTAGE: *val = 0; if (chan->channel == 0) *val2 = 18315; else *val2 = 610500; return IIO_VAL_INT_PLUS_MICRO; case IIO_TEMP: *val = 0; *val2 = 145300; return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } break; case IIO_CHAN_INFO_OFFSET: *val = 25; return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBBIAS: switch (chan->type) { case IIO_ANGL_VEL: bits = 12; break; default: return -EINVAL; }; mutex_lock(&indio_dev->mlock); addr = adis16260_addresses[chan->address][1]; ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } val16 &= (1 << bits) - 1; val16 = (s16)(val16 << (16 - bits)) >> (16 - bits); *val = val16; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; case IIO_CHAN_INFO_CALIBSCALE: switch (chan->type) { case IIO_ANGL_VEL: bits = 12; break; default: return -EINVAL; }; mutex_lock(&indio_dev->mlock); addr = adis16260_addresses[chan->address][2]; ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); if (ret) { mutex_unlock(&indio_dev->mlock); return ret; } *val = (1 << bits) - 1; mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT; } return -EINVAL; } static int adis16260_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { int bits = 12; s16 val16; u8 addr; switch (mask) { case IIO_CHAN_INFO_CALIBBIAS: val16 = val & ((1 << bits) - 1); addr = adis16260_addresses[chan->address][1]; return adis16260_spi_write_reg_16(indio_dev, addr, val16); case IIO_CHAN_INFO_CALIBSCALE: val16 = val & ((1 << bits) - 1); addr = adis16260_addresses[chan->address][2]; return adis16260_spi_write_reg_16(indio_dev, addr, val16); } return -EINVAL; } static struct attribute *adis16260_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, &iio_dev_attr_sampling_frequency_available.dev_attr.attr, &iio_dev_attr_reset.dev_attr.attr, NULL }; static const struct attribute_group adis16260_attribute_group = { .attrs = adis16260_attributes, }; static const struct iio_info adis16260_info = { .attrs = &adis16260_attribute_group, .read_raw = &adis16260_read_raw, .write_raw = &adis16260_write_raw, .driver_module = THIS_MODULE, }; static int __devinit adis16260_probe(struct spi_device *spi) { int ret; struct adis16260_platform_data *pd = spi->dev.platform_data; struct adis16260_state *st; struct iio_dev *indio_dev; /* setup the industrialio driver allocated elements */ indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_ret; } st = iio_priv(indio_dev); if (pd) st->negate = pd->negate; /* this is only used for removal purposes */ spi_set_drvdata(spi, st); st->us = spi; mutex_init(&st->buf_lock); indio_dev->name = spi_get_device_id(st->us)->name; indio_dev->dev.parent = &spi->dev; indio_dev->info = &adis16260_info; indio_dev->num_channels = ARRAY_SIZE(adis16260_channels_x); if (pd && pd->direction) switch (pd->direction) { case 'x': indio_dev->channels = adis16260_channels_x; break; case 'y': indio_dev->channels = adis16260_channels_y; break; case 'z': indio_dev->channels = adis16260_channels_z; break; default: return -EINVAL; } else indio_dev->channels = adis16260_channels_x; indio_dev->num_channels = ARRAY_SIZE(adis16260_channels_x); indio_dev->modes = INDIO_DIRECT_MODE; ret = adis16260_configure_ring(indio_dev); if (ret) goto error_free_dev; ret = iio_buffer_register(indio_dev, indio_dev->channels, ARRAY_SIZE(adis16260_channels_x)); if (ret) { printk(KERN_ERR "failed to initialize the ring\n"); goto error_unreg_ring_funcs; } if (indio_dev->buffer) { /* Set default scan mode */ iio_scan_mask_set(indio_dev, indio_dev->buffer, ADIS16260_SCAN_SUPPLY); iio_scan_mask_set(indio_dev, indio_dev->buffer, ADIS16260_SCAN_GYRO); iio_scan_mask_set(indio_dev, indio_dev->buffer, ADIS16260_SCAN_AUX_ADC); iio_scan_mask_set(indio_dev, indio_dev->buffer, ADIS16260_SCAN_TEMP); iio_scan_mask_set(indio_dev, indio_dev->buffer, ADIS16260_SCAN_ANGL); } if (spi->irq) { ret = adis16260_probe_trigger(indio_dev); if (ret) goto error_uninitialize_ring; } /* Get the device into a sane initial state */ ret = adis16260_initial_setup(indio_dev); if (ret) goto error_remove_trigger; ret = iio_device_register(indio_dev); if (ret) goto error_remove_trigger; return 0; error_remove_trigger: adis16260_remove_trigger(indio_dev); error_uninitialize_ring: iio_buffer_unregister(indio_dev); error_unreg_ring_funcs: adis16260_unconfigure_ring(indio_dev); error_free_dev: iio_free_device(indio_dev); error_ret: return ret; } static int adis16260_remove(struct spi_device *spi) { int ret; struct iio_dev *indio_dev = spi_get_drvdata(spi); iio_device_unregister(indio_dev); ret = adis16260_stop_device(indio_dev); if (ret) goto err_ret; flush_scheduled_work(); adis16260_remove_trigger(indio_dev); iio_buffer_unregister(indio_dev); adis16260_unconfigure_ring(indio_dev); iio_free_device(indio_dev); err_ret: return ret; } /* * These parts do not need to be differentiated until someone adds * support for the on chip filtering. */ static const struct spi_device_id adis16260_id[] = { {"adis16260", 0}, {"adis16265", 0}, {"adis16250", 0}, {"adis16255", 0}, {"adis16251", 1}, {} }; MODULE_DEVICE_TABLE(spi, adis16260_id); static struct spi_driver adis16260_driver = { .driver = { .name = "adis16260", .owner = THIS_MODULE, }, .probe = adis16260_probe, .remove = __devexit_p(adis16260_remove), .id_table = adis16260_id, }; module_spi_driver(adis16260_driver); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor"); MODULE_LICENSE("GPL v2");
gpl-2.0
miuihu/android_kernel_xiaomi_armor
arch/x86/xen/platform-pci-unplug.c
5477
4478
/****************************************************************************** * platform-pci-unplug.c * * Xen platform PCI device driver * Copyright (c) 2010, Citrix * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <xen/platform_pci.h> #define XEN_PLATFORM_ERR_MAGIC -1 #define XEN_PLATFORM_ERR_PROTOCOL -2 #define XEN_PLATFORM_ERR_BLACKLIST -3 /* store the value of xen_emul_unplug after the unplug is done */ int xen_platform_pci_unplug; EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); #ifdef CONFIG_XEN_PVHVM static int xen_emul_unplug; static int check_platform_magic(void) { short magic; char protocol; magic = inw(XEN_IOPORT_MAGIC); if (magic != XEN_IOPORT_MAGIC_VAL) { printk(KERN_ERR "Xen Platform PCI: unrecognised magic value\n"); return XEN_PLATFORM_ERR_MAGIC; } protocol = inb(XEN_IOPORT_PROTOVER); printk(KERN_DEBUG "Xen Platform PCI: I/O protocol version %d\n", protocol); switch (protocol) { case 1: outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { printk(KERN_ERR "Xen Platform: blacklisted by host\n"); return XEN_PLATFORM_ERR_BLACKLIST; } break; default: printk(KERN_WARNING "Xen Platform PCI: unknown I/O protocol version"); return XEN_PLATFORM_ERR_PROTOCOL; } return 0; } void xen_unplug_emulated_devices(void) { int r; /* user explicitly requested no unplug */ if (xen_emul_unplug & XEN_UNPLUG_NEVER) return; /* check the version of the xen platform PCI device */ r = check_platform_magic(); /* If the version matches enable the Xen platform PCI driver. * Also enable the Xen platform PCI driver if the host does * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC) * but the user told us that unplugging is unnecessary. */ if (r && !(r == XEN_PLATFORM_ERR_MAGIC && (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))) return; /* Set the default value of xen_emul_unplug depending on whether or * not the Xen PV frontends and the Xen platform PCI driver have * been compiled for this kernel (modules or built-in are both OK). */ if (!xen_emul_unplug) { if (xen_must_unplug_nics()) { printk(KERN_INFO "Netfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated NICs.\n"); xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; } if (xen_must_unplug_disks()) { printk(KERN_INFO "Blkfront and the Xen platform PCI driver have " "been compiled for this kernel: unplug emulated disks.\n" "You might have to change the root device\n" "from /dev/hd[a-d] to /dev/xvd[a-d]\n" "in your root= kernel command line option\n"); xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS; } } /* Now unplug the emulated devices */ if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)) outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); xen_platform_pci_unplug = xen_emul_unplug; } static int __init parse_xen_emul_unplug(char *arg) { char *p, *q; int l; for (p = arg; p; p = q) { q = strchr(p, ','); if (q) { l = q - p; q++; } else { l = strlen(p); } if (!strncmp(p, "all", l)) xen_emul_unplug |= XEN_UNPLUG_ALL; else if (!strncmp(p, "ide-disks", l)) xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS; else if (!strncmp(p, "aux-ide-disks", l)) xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; else if (!strncmp(p, "nics", l)) xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; else if (!strncmp(p, "unnecessary", l)) xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY; else if (!strncmp(p, "never", l)) xen_emul_unplug |= XEN_UNPLUG_NEVER; else printk(KERN_WARNING "unrecognised option '%s' " "in parameter 'xen_emul_unplug'\n", p); } return 0; } early_param("xen_emul_unplug", parse_xen_emul_unplug); #endif
gpl-2.0
Split-Screen/android_kernel_xiaomi_cancro
arch/arm/mm/fsr-3level.c
6501
3487
static struct fsr_info fsr_info[] = { { do_bad, SIGBUS, 0, "unknown 0" }, { do_bad, SIGBUS, 0, "unknown 1" }, { do_bad, SIGBUS, 0, "unknown 2" }, { do_bad, SIGBUS, 0, "unknown 3" }, { do_bad, SIGBUS, 0, "reserved translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, { do_bad, SIGBUS, 0, "reserved access flag fault" }, { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, { do_bad, SIGBUS, 0, "reserved permission fault" }, { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, { do_bad, SIGBUS, 0, "synchronous external abort" }, { do_bad, SIGBUS, 0, "asynchronous external abort" }, { do_bad, SIGBUS, 0, "unknown 18" }, { do_bad, SIGBUS, 0, "unknown 19" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, { do_bad, SIGBUS, 0, "synchronous parity error" }, { do_bad, SIGBUS, 0, "asynchronous parity error" }, { do_bad, SIGBUS, 0, "unknown 26" }, { do_bad, SIGBUS, 0, "unknown 27" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, { do_bad, SIGBUS, 0, "unknown 32" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGBUS, 0, "debug event" }, { do_bad, SIGBUS, 0, "unknown 35" }, { do_bad, SIGBUS, 0, "unknown 36" }, { do_bad, SIGBUS, 0, "unknown 37" }, { do_bad, SIGBUS, 0, "unknown 38" }, { do_bad, SIGBUS, 0, "unknown 39" }, { do_bad, SIGBUS, 0, "unknown 40" }, { do_bad, SIGBUS, 0, "unknown 41" }, { do_bad, SIGBUS, 0, "unknown 42" }, { do_bad, SIGBUS, 0, "unknown 43" }, { do_bad, SIGBUS, 0, "unknown 44" }, { do_bad, SIGBUS, 0, "unknown 45" }, { do_bad, SIGBUS, 0, "unknown 46" }, { do_bad, SIGBUS, 0, "unknown 47" }, { do_bad, SIGBUS, 0, "unknown 48" }, { do_bad, SIGBUS, 0, "unknown 49" }, { do_bad, SIGBUS, 0, "unknown 50" }, { do_bad, SIGBUS, 0, "unknown 51" }, { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, { do_bad, SIGBUS, 0, "unknown 53" }, { do_bad, SIGBUS, 0, "unknown 54" }, { do_bad, SIGBUS, 0, "unknown 55" }, { do_bad, SIGBUS, 0, "unknown 56" }, { do_bad, SIGBUS, 0, "unknown 57" }, { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, { do_bad, SIGBUS, 0, "unknown 59" }, { do_bad, SIGBUS, 0, "unknown 60" }, { do_bad, SIGBUS, 0, "unknown 61" }, { do_bad, SIGBUS, 0, "unknown 62" }, { do_bad, SIGBUS, 0, "unknown 63" }, }; #define ifsr_info fsr_info
gpl-2.0
xdatravelbug/N909D_Kernel_JB_4.1.2
fs/adfs/dir_f.c
12389
10602
/* * linux/fs/adfs/dir_f.c * * Copyright (C) 1997-1999 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * E and F format directory handling */ #include <linux/buffer_head.h> #include "adfs.h" #include "dir_f.h" static void adfs_f_free(struct adfs_dir *dir); /* * Read an (unaligned) value of length 1..4 bytes */ static inline unsigned int adfs_readval(unsigned char *p, int len) { unsigned int val = 0; switch (len) { case 4: val |= p[3] << 24; case 3: val |= p[2] << 16; case 2: val |= p[1] << 8; default: val |= p[0]; } return val; } static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; case 3: p[2] = val >> 16; case 2: p[1] = val >> 8; default: p[0] = val; } } static inline int adfs_readname(char *buf, char *ptr, int maxlen) { char *old_buf = buf; while ((unsigned char)*ptr >= ' ' && maxlen--) { if (*ptr == '/') *buf++ = '.'; else *buf++ = *ptr; ptr++; } return buf - old_buf; } #define ror13(v) ((v >> 13) | (v << 19)) #define dir_u8(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(u8 *)(bh[_buf]->b_data + _off); \ }) #define dir_u32(idx) \ ({ int _buf = idx >> blocksize_bits; \ int _off = idx - (_buf << blocksize_bits);\ *(__le32 *)(bh[_buf]->b_data + _off); \ }) #define bufoff(_bh,_idx) \ ({ int _buf = _idx >> blocksize_bits; \ int _off = _idx - (_buf << blocksize_bits);\ (u8 *)(_bh[_buf]->b_data + _off); \ }) /* * There are some algorithms that are nice in * assembler, but a bitch in C... This is one * of them. */ static u8 adfs_dir_checkbyte(const struct adfs_dir *dir) { struct buffer_head * const *bh = dir->bh; const int blocksize_bits = dir->sb->s_blocksize_bits; union { __le32 *ptr32; u8 *ptr8; } ptr, end; u32 dircheck = 0; int last = 5 - 26; int i = 0; /* * Accumulate each word up to the last whole * word of the last directory entry. This * can spread across several buffer heads. */ do { last += 26; do { dircheck = le32_to_cpu(dir_u32(i)) ^ ror13(dircheck); i += sizeof(u32); } while (i < (last & ~3)); } while (dir_u8(last) != 0); /* * Accumulate the last few bytes. These * bytes will be within the same bh. */ if (i != last) { ptr.ptr8 = bufoff(bh, i); end.ptr8 = ptr.ptr8 + last - i; do { dircheck = *ptr.ptr8++ ^ ror13(dircheck); } while (ptr.ptr8 < end.ptr8); } /* * The directory tail is in the final bh * Note that contary to the RISC OS PRMs, * the first few bytes are NOT included * in the check. All bytes are in the * same bh. */ ptr.ptr8 = bufoff(bh, 2008); end.ptr8 = ptr.ptr8 + 36; do { __le32 v = *ptr.ptr32++; dircheck = le32_to_cpu(v) ^ ror13(dircheck); } while (ptr.ptr32 < end.ptr32); return (dircheck ^ (dircheck >> 8) ^ (dircheck >> 16) ^ (dircheck >> 24)) & 0xff; } /* * Read and check that a directory is valid */ static int adfs_dir_read(struct super_block *sb, unsigned long object_id, unsigned int size, struct adfs_dir *dir) { const unsigned int blocksize_bits = sb->s_blocksize_bits; int blk = 0; /* * Directories which are not a multiple of 2048 bytes * are considered bad v2 [3.6] */ if (size & 2047) goto bad_dir; size >>= blocksize_bits; dir->nr_buffers = 0; dir->sb = sb; for (blk = 0; blk < size; blk++) { int phys; phys = __adfs_block_map(sb, object_id, blk); if (!phys) { adfs_error(sb, "dir object %lX has a hole at offset %d", object_id, blk); goto release_buffers; } dir->bh[blk] = sb_bread(sb, phys); if (!dir->bh[blk]) goto release_buffers; } memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead)); memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail)); if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) goto bad_dir; if (memcmp(&dir->dirhead.startname, "Nick", 4) && memcmp(&dir->dirhead.startname, "Hugo", 4)) goto bad_dir; if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) goto bad_dir; dir->nr_buffers = blk; return 0; bad_dir: adfs_error(sb, "corrupted directory fragment %lX", object_id); release_buffers: for (blk -= 1; blk >= 0; blk -= 1) brelse(dir->bh[blk]); dir->sb = NULL; return -EIO; } /* * convert a disk-based directory entry to a Linux ADFS directory entry */ static inline void adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj, struct adfs_direntry *de) { obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN); obj->file_id = adfs_readval(de->dirinddiscadd, 3); obj->loadaddr = adfs_readval(de->dirload, 4); obj->execaddr = adfs_readval(de->direxec, 4); obj->size = adfs_readval(de->dirlen, 4); obj->attr = de->newdiratts; obj->filetype = -1; /* * object is a file and is filetyped and timestamped? * RISC OS 12-bit filetype is stored in load_address[19:8] */ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) && (0xfff00000 == (0xfff00000 & obj->loadaddr))) { obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8); /* optionally append the ,xyz hex filetype suffix */ if (ADFS_SB(dir->sb)->s_ftsuffix) obj->name_len += append_filetype_suffix( &obj->name[obj->name_len], obj->filetype); } } /* * convert a Linux ADFS directory entry to a disk-based directory entry */ static inline void adfs_obj2dir(struct adfs_direntry *de, struct object_info *obj) { adfs_writeval(de->dirinddiscadd, 3, obj->file_id); adfs_writeval(de->dirload, 4, obj->loadaddr); adfs_writeval(de->direxec, 4, obj->execaddr); adfs_writeval(de->dirlen, 4, obj->size); de->newdiratts = obj->attr; } /* * get a directory entry. Note that the caller is responsible * for holding the relevant locks. */ static int __adfs_dir_get(struct adfs_dir *dir, int pos, struct object_info *obj) { struct super_block *sb = dir->sb; struct adfs_direntry de; int thissize, buffer, offset; buffer = pos >> sb->s_blocksize_bits; if (buffer > dir->nr_buffers) return -EINVAL; offset = pos & (sb->s_blocksize - 1); thissize = sb->s_blocksize - offset; if (thissize > 26) thissize = 26; memcpy(&de, dir->bh[buffer]->b_data + offset, thissize); if (thissize != 26) memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data, 26 - thissize); if (!de.dirobname[0]) return -ENOENT; adfs_dir2obj(dir, obj, &de); return 0; } static int __adfs_dir_put(struct adfs_dir *dir, int pos, struct object_info *obj) { struct super_block *sb = dir->sb; struct adfs_direntry de; int thissize, buffer, offset; buffer = pos >> sb->s_blocksize_bits; if (buffer > dir->nr_buffers) return -EINVAL; offset = pos & (sb->s_blocksize - 1); thissize = sb->s_blocksize - offset; if (thissize > 26) thissize = 26; /* * Get the entry in total */ memcpy(&de, dir->bh[buffer]->b_data + offset, thissize); if (thissize != 26) memcpy(((char *)&de) + thissize, dir->bh[buffer + 1]->b_data, 26 - thissize); /* * update it */ adfs_obj2dir(&de, obj); /* * Put the new entry back */ memcpy(dir->bh[buffer]->b_data + offset, &de, thissize); if (thissize != 26) memcpy(dir->bh[buffer + 1]->b_data, ((char *)&de) + thissize, 26 - thissize); return 0; } /* * the caller is responsible for holding the necessary * locks. */ static int adfs_dir_find_entry(struct adfs_dir *dir, unsigned long object_id) { int pos, ret; ret = -ENOENT; for (pos = 5; pos < ADFS_NUM_DIR_ENTRIES * 26 + 5; pos += 26) { struct object_info obj; if (!__adfs_dir_get(dir, pos, &obj)) break; if (obj.file_id == object_id) { ret = pos; break; } } return ret; } static int adfs_f_read(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir) { int ret; if (sz != ADFS_NEWDIR_SIZE) return -EIO; ret = adfs_dir_read(sb, id, sz, dir); if (ret) adfs_error(sb, "unable to read directory"); else dir->parent_id = adfs_readval(dir->dirtail.new.dirparent, 3); return ret; } static int adfs_f_setpos(struct adfs_dir *dir, unsigned int fpos) { if (fpos >= ADFS_NUM_DIR_ENTRIES) return -ENOENT; dir->pos = 5 + fpos * 26; return 0; } static int adfs_f_getnext(struct adfs_dir *dir, struct object_info *obj) { unsigned int ret; ret = __adfs_dir_get(dir, dir->pos, obj); if (ret == 0) dir->pos += 26; return ret; } static int adfs_f_update(struct adfs_dir *dir, struct object_info *obj) { struct super_block *sb = dir->sb; int ret, i; ret = adfs_dir_find_entry(dir, obj->file_id); if (ret < 0) { adfs_error(dir->sb, "unable to locate entry to update"); goto out; } __adfs_dir_put(dir, ret, obj); /* * Increment directory sequence number */ dir->bh[0]->b_data[0] += 1; dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 6] += 1; ret = adfs_dir_checkbyte(dir); /* * Update directory check byte */ dir->bh[dir->nr_buffers - 1]->b_data[sb->s_blocksize - 1] = ret; #if 1 { const unsigned int blocksize_bits = sb->s_blocksize_bits; memcpy(&dir->dirhead, bufoff(dir->bh, 0), sizeof(dir->dirhead)); memcpy(&dir->dirtail, bufoff(dir->bh, 2007), sizeof(dir->dirtail)); if (dir->dirhead.startmasseq != dir->dirtail.new.endmasseq || memcmp(&dir->dirhead.startname, &dir->dirtail.new.endname, 4)) goto bad_dir; if (memcmp(&dir->dirhead.startname, "Nick", 4) && memcmp(&dir->dirhead.startname, "Hugo", 4)) goto bad_dir; if (adfs_dir_checkbyte(dir) != dir->dirtail.new.dircheckbyte) goto bad_dir; } #endif for (i = dir->nr_buffers - 1; i >= 0; i--) mark_buffer_dirty(dir->bh[i]); ret = 0; out: return ret; #if 1 bad_dir: adfs_error(dir->sb, "whoops! I broke a directory!"); return -EIO; #endif } static int adfs_f_sync(struct adfs_dir *dir) { int err = 0; int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { struct buffer_head *bh = dir->bh[i]; sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) err = -EIO; } return err; } static void adfs_f_free(struct adfs_dir *dir) { int i; for (i = dir->nr_buffers - 1; i >= 0; i--) { brelse(dir->bh[i]); dir->bh[i] = NULL; } dir->nr_buffers = 0; dir->sb = NULL; } struct adfs_dir_ops adfs_f_dir_ops = { .read = adfs_f_read, .setpos = adfs_f_setpos, .getnext = adfs_f_getnext, .update = adfs_f_update, .sync = adfs_f_sync, .free = adfs_f_free };
gpl-2.0
ench0/android_kernel_samsung_hlted
Documentation/prctl/disable-tsc-on-off-stress-test.c
12901
1717
/* * Tests for prctl(PR_GET_TSC, ...) / prctl(PR_SET_TSC, ...) * * Tests if the control register is updated correctly * when set with prctl() * * Warning: this test will cause a very high load for a few seconds * */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <inttypes.h> #include <wait.h> #include <sys/prctl.h> #include <linux/prctl.h> /* Get/set the process' ability to use the timestamp counter instruction */ #ifndef PR_GET_TSC #define PR_GET_TSC 25 #define PR_SET_TSC 26 # define PR_TSC_ENABLE 1 /* allow the use of the timestamp counter */ # define PR_TSC_SIGSEGV 2 /* throw a SIGSEGV instead of reading the TSC */ #endif /* snippet from wikipedia :-) */ uint64_t rdtsc() { uint32_t lo, hi; /* We cannot use "=A", since this would use %rax on x86_64 */ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (uint64_t)hi << 32 | lo; } int should_segv = 0; void sigsegv_cb(int sig) { if (!should_segv) { fprintf(stderr, "FATAL ERROR, rdtsc() failed while enabled\n"); exit(0); } if (prctl(PR_SET_TSC, PR_TSC_ENABLE) < 0) { perror("prctl"); exit(0); } should_segv = 0; rdtsc(); } void task(void) { signal(SIGSEGV, sigsegv_cb); alarm(10); for(;;) { rdtsc(); if (should_segv) { fprintf(stderr, "FATAL ERROR, rdtsc() succeeded while disabled\n"); exit(0); } if (prctl(PR_SET_TSC, PR_TSC_SIGSEGV) < 0) { perror("prctl"); exit(0); } should_segv = 1; } } int main(int argc, char **argv) { int n_tasks = 100, i; fprintf(stderr, "[No further output means we're allright]\n"); for (i=0; i<n_tasks; i++) if (fork() == 0) task(); for (i=0; i<n_tasks; i++) wait(NULL); exit(0); }
gpl-2.0
TeamRegular/android_kernel_samsung_exynos3470
drivers/misc/sgi-xp/xp_uv.c
14181
3985
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. */ /* * Cross Partition (XP) uv-based functions. * * Architecture specific implementation of common functions. * */ #include <linux/device.h> #include <asm/uv/uv_hub.h> #if defined CONFIG_X86_64 #include <asm/uv/bios.h> #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV #include <asm/sn/sn_sal.h> #endif #include "../sgi-gru/grukservices.h" #include "xp.h" /* * Convert a virtual memory address to a physical memory address. */ static unsigned long xp_pa_uv(void *addr) { return uv_gpa(addr); } /* * Convert a global physical to socket physical address. */ static unsigned long xp_socket_pa_uv(unsigned long gpa) { return uv_gpa_to_soc_phys_ram(gpa); } static enum xp_retval xp_remote_mmr_read(unsigned long dst_gpa, const unsigned long src_gpa, size_t len) { int ret; unsigned long *dst_va = __va(uv_gpa_to_soc_phys_ram(dst_gpa)); BUG_ON(!uv_gpa_in_mmr_space(src_gpa)); BUG_ON(len != 8); ret = gru_read_gpa(dst_va, src_gpa); if (ret == 0) return xpSuccess; dev_err(xp, "gru_read_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " "len=%ld\n", dst_gpa, src_gpa, len); return xpGruCopyError; } static enum xp_retval xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, size_t len) { int ret; if (uv_gpa_in_mmr_space(src_gpa)) return xp_remote_mmr_read(dst_gpa, src_gpa, len); ret = gru_copy_gpa(dst_gpa, src_gpa, len); if (ret == 0) return xpSuccess; dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " "len=%ld\n", dst_gpa, src_gpa, len); return xpGruCopyError; } static int xp_cpu_to_nasid_uv(int cpuid) { /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */ return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); } static enum xp_retval xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) { int ret; #if defined CONFIG_X86_64 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); if (ret != BIOS_STATUS_SUCCESS) { dev_err(xp, "uv_bios_change_memprotect(,, " "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); return xpBiosError; } #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV u64 nasid_array; ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, &nasid_array); if (ret != 0) { dev_err(xp, "sn_change_memprotect(,, " "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); return xpSalError; } #else #error not a supported configuration #endif return xpSuccess; } static enum xp_retval xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) { int ret; #if defined CONFIG_X86_64 ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_RESTRICT_ACCESS); if (ret != BIOS_STATUS_SUCCESS) { dev_err(xp, "uv_bios_change_memprotect(,, " "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); return xpBiosError; } #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV u64 nasid_array; ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, &nasid_array); if (ret != 0) { dev_err(xp, "sn_change_memprotect(,, " "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); return xpSalError; } #else #error not a supported configuration #endif return xpSuccess; } enum xp_retval xp_init_uv(void) { BUG_ON(!is_uv()); xp_max_npartitions = XP_MAX_NPARTITIONS_UV; xp_partition_id = sn_partition_id; xp_region_size = sn_region_size; xp_pa = xp_pa_uv; xp_socket_pa = xp_socket_pa_uv; xp_remote_memcpy = xp_remote_memcpy_uv; xp_cpu_to_nasid = xp_cpu_to_nasid_uv; xp_expand_memprotect = xp_expand_memprotect_uv; xp_restrict_memprotect = xp_restrict_memprotect_uv; return xpSuccess; } void xp_exit_uv(void) { BUG_ON(!is_uv()); }
gpl-2.0
houstar/linux-3.8.8
sound/soc/codecs/max98088.c
102
76210
/* * max98088.c -- MAX98088 ALSA SoC Audio driver * * Copyright 2010 Maxim Integrated Products * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include <linux/slab.h> #include <asm/div64.h> #include <sound/max98088.h> #include "max98088.h" enum max98088_type { MAX98088, MAX98089, }; struct max98088_cdata { unsigned int rate; unsigned int fmt; int eq_sel; }; struct max98088_priv { enum max98088_type devtype; struct max98088_pdata *pdata; unsigned int sysclk; struct max98088_cdata dai[2]; int eq_textcnt; const char **eq_texts; struct soc_enum eq_enum; u8 ina_state; u8 inb_state; unsigned int ex_mode; unsigned int digmic; unsigned int mic1pre; unsigned int mic2pre; unsigned int extmic_mode; }; static const u8 max98088_reg[M98088_REG_CNT] = { 0x00, /* 00 IRQ status */ 0x00, /* 01 MIC status */ 0x00, /* 02 jack status */ 0x00, /* 03 battery voltage */ 0x00, /* 04 */ 0x00, /* 05 */ 0x00, /* 06 */ 0x00, /* 07 */ 0x00, /* 08 */ 0x00, /* 09 */ 0x00, /* 0A */ 0x00, /* 0B */ 0x00, /* 0C */ 0x00, /* 0D */ 0x00, /* 0E */ 0x00, /* 0F interrupt enable */ 0x00, /* 10 master clock */ 0x00, /* 11 DAI1 clock mode */ 0x00, /* 12 DAI1 clock control */ 0x00, /* 13 DAI1 clock control */ 0x00, /* 14 DAI1 format */ 0x00, /* 15 DAI1 clock */ 0x00, /* 16 DAI1 config */ 0x00, /* 17 DAI1 TDM */ 0x00, /* 18 DAI1 filters */ 0x00, /* 19 DAI2 clock mode */ 0x00, /* 1A DAI2 clock control */ 0x00, /* 1B DAI2 clock control */ 0x00, /* 1C DAI2 format */ 0x00, /* 1D DAI2 clock */ 0x00, /* 1E DAI2 config */ 0x00, /* 1F DAI2 TDM */ 0x00, /* 20 DAI2 filters */ 0x00, /* 21 data config */ 0x00, /* 22 DAC mixer */ 0x00, /* 23 left ADC mixer */ 0x00, /* 24 right ADC mixer */ 0x00, /* 25 left HP mixer */ 0x00, /* 26 right HP mixer */ 0x00, /* 27 HP control */ 0x00, /* 28 left REC mixer */ 0x00, /* 29 right REC mixer */ 0x00, /* 2A REC control */ 0x00, /* 2B left SPK mixer */ 0x00, /* 2C right SPK mixer */ 0x00, /* 2D SPK control */ 0x00, /* 2E sidetone */ 0x00, /* 2F DAI1 playback level */ 0x00, /* 30 DAI1 playback level */ 0x00, /* 31 DAI2 playback level */ 0x00, /* 32 DAI2 playbakc level */ 0x00, /* 33 left ADC level */ 0x00, /* 34 right ADC level */ 0x00, /* 35 MIC1 level */ 0x00, /* 36 MIC2 level */ 0x00, /* 37 INA level */ 0x00, /* 38 INB level */ 0x00, /* 39 left HP volume */ 0x00, /* 3A right HP volume */ 0x00, /* 3B left REC volume */ 0x00, /* 3C right REC volume */ 0x00, /* 3D left SPK volume */ 0x00, /* 3E right SPK volume */ 0x00, /* 3F MIC config */ 0x00, /* 40 MIC threshold */ 0x00, /* 41 excursion limiter filter */ 0x00, /* 42 excursion limiter threshold */ 0x00, /* 43 ALC */ 0x00, /* 44 power limiter threshold */ 0x00, /* 45 power limiter config */ 0x00, /* 46 distortion limiter config */ 0x00, /* 47 audio input */ 0x00, /* 48 microphone */ 0x00, /* 49 level control */ 0x00, /* 4A bypass switches */ 0x00, /* 4B jack detect */ 0x00, /* 4C input enable */ 0x00, /* 4D output enable */ 0xF0, /* 4E bias control */ 0x00, /* 4F DAC power */ 0x0F, /* 50 DAC power */ 0x00, /* 51 system */ 0x00, /* 52 DAI1 EQ1 */ 0x00, /* 53 DAI1 EQ1 */ 0x00, /* 54 DAI1 EQ1 */ 0x00, /* 55 DAI1 EQ1 */ 0x00, /* 56 DAI1 EQ1 */ 0x00, /* 57 DAI1 EQ1 */ 0x00, /* 58 DAI1 EQ1 */ 0x00, /* 59 DAI1 EQ1 */ 0x00, /* 5A DAI1 EQ1 */ 0x00, /* 5B DAI1 EQ1 */ 0x00, /* 5C DAI1 EQ2 */ 0x00, /* 5D DAI1 EQ2 */ 0x00, /* 5E DAI1 EQ2 */ 0x00, /* 5F DAI1 EQ2 */ 0x00, /* 60 DAI1 EQ2 */ 0x00, /* 61 DAI1 EQ2 */ 0x00, /* 62 DAI1 EQ2 */ 0x00, /* 63 DAI1 EQ2 */ 0x00, /* 64 DAI1 EQ2 */ 0x00, /* 65 DAI1 EQ2 */ 0x00, /* 66 DAI1 EQ3 */ 0x00, /* 67 DAI1 EQ3 */ 0x00, /* 68 DAI1 EQ3 */ 0x00, /* 69 DAI1 EQ3 */ 0x00, /* 6A DAI1 EQ3 */ 0x00, /* 6B DAI1 EQ3 */ 0x00, /* 6C DAI1 EQ3 */ 0x00, /* 6D DAI1 EQ3 */ 0x00, /* 6E DAI1 EQ3 */ 0x00, /* 6F DAI1 EQ3 */ 0x00, /* 70 DAI1 EQ4 */ 0x00, /* 71 DAI1 EQ4 */ 0x00, /* 72 DAI1 EQ4 */ 0x00, /* 73 DAI1 EQ4 */ 0x00, /* 74 DAI1 EQ4 */ 0x00, /* 75 DAI1 EQ4 */ 0x00, /* 76 DAI1 EQ4 */ 0x00, /* 77 DAI1 EQ4 */ 0x00, /* 78 DAI1 EQ4 */ 0x00, /* 79 DAI1 EQ4 */ 0x00, /* 7A DAI1 EQ5 */ 0x00, /* 7B DAI1 EQ5 */ 0x00, /* 7C DAI1 EQ5 */ 0x00, /* 7D DAI1 EQ5 */ 0x00, /* 7E DAI1 EQ5 */ 0x00, /* 7F DAI1 EQ5 */ 0x00, /* 80 DAI1 EQ5 */ 0x00, /* 81 DAI1 EQ5 */ 0x00, /* 82 DAI1 EQ5 */ 0x00, /* 83 DAI1 EQ5 */ 0x00, /* 84 DAI2 EQ1 */ 0x00, /* 85 DAI2 EQ1 */ 0x00, /* 86 DAI2 EQ1 */ 0x00, /* 87 DAI2 EQ1 */ 0x00, /* 88 DAI2 EQ1 */ 0x00, /* 89 DAI2 EQ1 */ 0x00, /* 8A DAI2 EQ1 */ 0x00, /* 8B DAI2 EQ1 */ 0x00, /* 8C DAI2 EQ1 */ 0x00, /* 8D DAI2 EQ1 */ 0x00, /* 8E DAI2 EQ2 */ 0x00, /* 8F DAI2 EQ2 */ 0x00, /* 90 DAI2 EQ2 */ 0x00, /* 91 DAI2 EQ2 */ 0x00, /* 92 DAI2 EQ2 */ 0x00, /* 93 DAI2 EQ2 */ 0x00, /* 94 DAI2 EQ2 */ 0x00, /* 95 DAI2 EQ2 */ 0x00, /* 96 DAI2 EQ2 */ 0x00, /* 97 DAI2 EQ2 */ 0x00, /* 98 DAI2 EQ3 */ 0x00, /* 99 DAI2 EQ3 */ 0x00, /* 9A DAI2 EQ3 */ 0x00, /* 9B DAI2 EQ3 */ 0x00, /* 9C DAI2 EQ3 */ 0x00, /* 9D DAI2 EQ3 */ 0x00, /* 9E DAI2 EQ3 */ 0x00, /* 9F DAI2 EQ3 */ 0x00, /* A0 DAI2 EQ3 */ 0x00, /* A1 DAI2 EQ3 */ 0x00, /* A2 DAI2 EQ4 */ 0x00, /* A3 DAI2 EQ4 */ 0x00, /* A4 DAI2 EQ4 */ 0x00, /* A5 DAI2 EQ4 */ 0x00, /* A6 DAI2 EQ4 */ 0x00, /* A7 DAI2 EQ4 */ 0x00, /* A8 DAI2 EQ4 */ 0x00, /* A9 DAI2 EQ4 */ 0x00, /* AA DAI2 EQ4 */ 0x00, /* AB DAI2 EQ4 */ 0x00, /* AC DAI2 EQ5 */ 0x00, /* AD DAI2 EQ5 */ 0x00, /* AE DAI2 EQ5 */ 0x00, /* AF DAI2 EQ5 */ 0x00, /* B0 DAI2 EQ5 */ 0x00, /* B1 DAI2 EQ5 */ 0x00, /* B2 DAI2 EQ5 */ 0x00, /* B3 DAI2 EQ5 */ 0x00, /* B4 DAI2 EQ5 */ 0x00, /* B5 DAI2 EQ5 */ 0x00, /* B6 DAI1 biquad */ 0x00, /* B7 DAI1 biquad */ 0x00, /* B8 DAI1 biquad */ 0x00, /* B9 DAI1 biquad */ 0x00, /* BA DAI1 biquad */ 0x00, /* BB DAI1 biquad */ 0x00, /* BC DAI1 biquad */ 0x00, /* BD DAI1 biquad */ 0x00, /* BE DAI1 biquad */ 0x00, /* BF DAI1 biquad */ 0x00, /* C0 DAI2 biquad */ 0x00, /* C1 DAI2 biquad */ 0x00, /* C2 DAI2 biquad */ 0x00, /* C3 DAI2 biquad */ 0x00, /* C4 DAI2 biquad */ 0x00, /* C5 DAI2 biquad */ 0x00, /* C6 DAI2 biquad */ 0x00, /* C7 DAI2 biquad */ 0x00, /* C8 DAI2 biquad */ 0x00, /* C9 DAI2 biquad */ 0x00, /* CA */ 0x00, /* CB */ 0x00, /* CC */ 0x00, /* CD */ 0x00, /* CE */ 0x00, /* CF */ 0x00, /* D0 */ 0x00, /* D1 */ 0x00, /* D2 */ 0x00, /* D3 */ 0x00, /* D4 */ 0x00, /* D5 */ 0x00, /* D6 */ 0x00, /* D7 */ 0x00, /* D8 */ 0x00, /* D9 */ 0x00, /* DA */ 0x70, /* DB */ 0x00, /* DC */ 0x00, /* DD */ 0x00, /* DE */ 0x00, /* DF */ 0x00, /* E0 */ 0x00, /* E1 */ 0x00, /* E2 */ 0x00, /* E3 */ 0x00, /* E4 */ 0x00, /* E5 */ 0x00, /* E6 */ 0x00, /* E7 */ 0x00, /* E8 */ 0x00, /* E9 */ 0x00, /* EA */ 0x00, /* EB */ 0x00, /* EC */ 0x00, /* ED */ 0x00, /* EE */ 0x00, /* EF */ 0x00, /* F0 */ 0x00, /* F1 */ 0x00, /* F2 */ 0x00, /* F3 */ 0x00, /* F4 */ 0x00, /* F5 */ 0x00, /* F6 */ 0x00, /* F7 */ 0x00, /* F8 */ 0x00, /* F9 */ 0x00, /* FA */ 0x00, /* FB */ 0x00, /* FC */ 0x00, /* FD */ 0x00, /* FE */ 0x00, /* FF */ }; static struct { int readable; int writable; int vol; } max98088_access[M98088_REG_CNT] = { { 0xFF, 0xFF, 1 }, /* 00 IRQ status */ { 0xFF, 0x00, 1 }, /* 01 MIC status */ { 0xFF, 0x00, 1 }, /* 02 jack status */ { 0x1F, 0x1F, 1 }, /* 03 battery voltage */ { 0xFF, 0xFF, 0 }, /* 04 */ { 0xFF, 0xFF, 0 }, /* 05 */ { 0xFF, 0xFF, 0 }, /* 06 */ { 0xFF, 0xFF, 0 }, /* 07 */ { 0xFF, 0xFF, 0 }, /* 08 */ { 0xFF, 0xFF, 0 }, /* 09 */ { 0xFF, 0xFF, 0 }, /* 0A */ { 0xFF, 0xFF, 0 }, /* 0B */ { 0xFF, 0xFF, 0 }, /* 0C */ { 0xFF, 0xFF, 0 }, /* 0D */ { 0xFF, 0xFF, 0 }, /* 0E */ { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */ { 0xFF, 0xFF, 0 }, /* 10 master clock */ { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */ { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */ { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */ { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */ { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */ { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */ { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */ { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */ { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */ { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */ { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */ { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */ { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */ { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */ { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */ { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */ { 0xFF, 0xFF, 0 }, /* 21 data config */ { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */ { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */ { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */ { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */ { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */ { 0xFF, 0xFF, 0 }, /* 27 HP control */ { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */ { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */ { 0xFF, 0xFF, 0 }, /* 2A REC control */ { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */ { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */ { 0xFF, 0xFF, 0 }, /* 2D SPK control */ { 0xFF, 0xFF, 0 }, /* 2E sidetone */ { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */ { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */ { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */ { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */ { 0xFF, 0xFF, 0 }, /* 33 left ADC level */ { 0xFF, 0xFF, 0 }, /* 34 right ADC level */ { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */ { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */ { 0xFF, 0xFF, 0 }, /* 37 INA level */ { 0xFF, 0xFF, 0 }, /* 38 INB level */ { 0xFF, 0xFF, 0 }, /* 39 left HP volume */ { 0xFF, 0xFF, 0 }, /* 3A right HP volume */ { 0xFF, 0xFF, 0 }, /* 3B left REC volume */ { 0xFF, 0xFF, 0 }, /* 3C right REC volume */ { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */ { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */ { 0xFF, 0xFF, 0 }, /* 3F MIC config */ { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */ { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */ { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */ { 0xFF, 0xFF, 0 }, /* 43 ALC */ { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */ { 0xFF, 0xFF, 0 }, /* 45 power limiter config */ { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */ { 0xFF, 0xFF, 0 }, /* 47 audio input */ { 0xFF, 0xFF, 0 }, /* 48 microphone */ { 0xFF, 0xFF, 0 }, /* 49 level control */ { 0xFF, 0xFF, 0 }, /* 4A bypass switches */ { 0xFF, 0xFF, 0 }, /* 4B jack detect */ { 0xFF, 0xFF, 0 }, /* 4C input enable */ { 0xFF, 0xFF, 0 }, /* 4D output enable */ { 0xFF, 0xFF, 0 }, /* 4E bias control */ { 0xFF, 0xFF, 0 }, /* 4F DAC power */ { 0xFF, 0xFF, 0 }, /* 50 DAC power */ { 0xFF, 0xFF, 0 }, /* 51 system */ { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */ { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */ { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */ { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */ { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */ { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */ { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */ { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */ { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */ { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */ { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */ { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */ { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */ { 0x00, 0x00, 0 }, /* CA */ { 0x00, 0x00, 0 }, /* CB */ { 0x00, 0x00, 0 }, /* CC */ { 0x00, 0x00, 0 }, /* CD */ { 0x00, 0x00, 0 }, /* CE */ { 0x00, 0x00, 0 }, /* CF */ { 0x00, 0x00, 0 }, /* D0 */ { 0x00, 0x00, 0 }, /* D1 */ { 0x00, 0x00, 0 }, /* D2 */ { 0x00, 0x00, 0 }, /* D3 */ { 0x00, 0x00, 0 }, /* D4 */ { 0x00, 0x00, 0 }, /* D5 */ { 0x00, 0x00, 0 }, /* D6 */ { 0x00, 0x00, 0 }, /* D7 */ { 0x00, 0x00, 0 }, /* D8 */ { 0x00, 0x00, 0 }, /* D9 */ { 0x00, 0x00, 0 }, /* DA */ { 0x00, 0x00, 0 }, /* DB */ { 0x00, 0x00, 0 }, /* DC */ { 0x00, 0x00, 0 }, /* DD */ { 0x00, 0x00, 0 }, /* DE */ { 0x00, 0x00, 0 }, /* DF */ { 0x00, 0x00, 0 }, /* E0 */ { 0x00, 0x00, 0 }, /* E1 */ { 0x00, 0x00, 0 }, /* E2 */ { 0x00, 0x00, 0 }, /* E3 */ { 0x00, 0x00, 0 }, /* E4 */ { 0x00, 0x00, 0 }, /* E5 */ { 0x00, 0x00, 0 }, /* E6 */ { 0x00, 0x00, 0 }, /* E7 */ { 0x00, 0x00, 0 }, /* E8 */ { 0x00, 0x00, 0 }, /* E9 */ { 0x00, 0x00, 0 }, /* EA */ { 0x00, 0x00, 0 }, /* EB */ { 0x00, 0x00, 0 }, /* EC */ { 0x00, 0x00, 0 }, /* ED */ { 0x00, 0x00, 0 }, /* EE */ { 0x00, 0x00, 0 }, /* EF */ { 0x00, 0x00, 0 }, /* F0 */ { 0x00, 0x00, 0 }, /* F1 */ { 0x00, 0x00, 0 }, /* F2 */ { 0x00, 0x00, 0 }, /* F3 */ { 0x00, 0x00, 0 }, /* F4 */ { 0x00, 0x00, 0 }, /* F5 */ { 0x00, 0x00, 0 }, /* F6 */ { 0x00, 0x00, 0 }, /* F7 */ { 0x00, 0x00, 0 }, /* F8 */ { 0x00, 0x00, 0 }, /* F9 */ { 0x00, 0x00, 0 }, /* FA */ { 0x00, 0x00, 0 }, /* FB */ { 0x00, 0x00, 0 }, /* FC */ { 0x00, 0x00, 0 }, /* FD */ { 0x00, 0x00, 0 }, /* FE */ { 0xFF, 0x00, 1 }, /* FF */ }; static int max98088_volatile_register(struct snd_soc_codec *codec, unsigned int reg) { return max98088_access[reg].vol; } /* * Load equalizer DSP coefficient configurations registers */ static void m98088_eq_band(struct snd_soc_codec *codec, unsigned int dai, unsigned int band, u16 *coefs) { unsigned int eq_reg; unsigned int i; BUG_ON(band > 4); BUG_ON(dai > 1); /* Load the base register address */ eq_reg = dai ? M98088_REG_84_DAI2_EQ_BASE : M98088_REG_52_DAI1_EQ_BASE; /* Add the band address offset, note adjustment for word address */ eq_reg += band * (M98088_COEFS_PER_BAND << 1); /* Step through the registers and coefs */ for (i = 0; i < M98088_COEFS_PER_BAND; i++) { snd_soc_write(codec, eq_reg++, M98088_BYTE1(coefs[i])); snd_soc_write(codec, eq_reg++, M98088_BYTE0(coefs[i])); } } /* * Excursion limiter modes */ static const char *max98088_exmode_texts[] = { "Off", "100Hz", "400Hz", "600Hz", "800Hz", "1000Hz", "200-400Hz", "400-600Hz", "400-800Hz", }; static const unsigned int max98088_exmode_values[] = { 0x00, 0x43, 0x10, 0x20, 0x30, 0x40, 0x11, 0x22, 0x32 }; static const struct soc_enum max98088_exmode_enum = SOC_VALUE_ENUM_SINGLE(M98088_REG_41_SPKDHP, 0, 127, ARRAY_SIZE(max98088_exmode_texts), max98088_exmode_texts, max98088_exmode_values); static const char *max98088_ex_thresh[] = { /* volts PP */ "0.6", "1.2", "1.8", "2.4", "3.0", "3.6", "4.2", "4.8"}; static const struct soc_enum max98088_ex_thresh_enum[] = { SOC_ENUM_SINGLE(M98088_REG_42_SPKDHP_THRESH, 0, 8, max98088_ex_thresh), }; static const char *max98088_fltr_mode[] = {"Voice", "Music" }; static const struct soc_enum max98088_filter_mode_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 7, 2, max98088_fltr_mode), }; static const char *max98088_extmic_text[] = { "None", "MIC1", "MIC2" }; static const struct soc_enum max98088_extmic_enum = SOC_ENUM_SINGLE(M98088_REG_48_CFG_MIC, 0, 3, max98088_extmic_text); static const struct snd_kcontrol_new max98088_extmic_mux = SOC_DAPM_ENUM("External MIC Mux", max98088_extmic_enum); static const char *max98088_dai1_fltr[] = { "Off", "fc=258/fs=16k", "fc=500/fs=16k", "fc=258/fs=8k", "fc=500/fs=8k", "fc=200"}; static const struct soc_enum max98088_dai1_dac_filter_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 0, 6, max98088_dai1_fltr), }; static const struct soc_enum max98088_dai1_adc_filter_enum[] = { SOC_ENUM_SINGLE(M98088_REG_18_DAI1_FILTERS, 4, 6, max98088_dai1_fltr), }; static int max98088_mic1pre_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); unsigned int sel = ucontrol->value.integer.value[0]; max98088->mic1pre = sel; snd_soc_update_bits(codec, M98088_REG_35_LVL_MIC1, M98088_MICPRE_MASK, (1+sel)<<M98088_MICPRE_SHIFT); return 0; } static int max98088_mic1pre_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = max98088->mic1pre; return 0; } static int max98088_mic2pre_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); unsigned int sel = ucontrol->value.integer.value[0]; max98088->mic2pre = sel; snd_soc_update_bits(codec, M98088_REG_36_LVL_MIC2, M98088_MICPRE_MASK, (1+sel)<<M98088_MICPRE_SHIFT); return 0; } static int max98088_mic2pre_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); ucontrol->value.integer.value[0] = max98088->mic2pre; return 0; } static const unsigned int max98088_micboost_tlv[] = { TLV_DB_RANGE_HEAD(2), 0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0), 2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0), }; static const struct snd_kcontrol_new max98088_snd_controls[] = { SOC_DOUBLE_R("Headphone Volume", M98088_REG_39_LVL_HP_L, M98088_REG_3A_LVL_HP_R, 0, 31, 0), SOC_DOUBLE_R("Speaker Volume", M98088_REG_3D_LVL_SPK_L, M98088_REG_3E_LVL_SPK_R, 0, 31, 0), SOC_DOUBLE_R("Receiver Volume", M98088_REG_3B_LVL_REC_L, M98088_REG_3C_LVL_REC_R, 0, 31, 0), SOC_DOUBLE_R("Headphone Switch", M98088_REG_39_LVL_HP_L, M98088_REG_3A_LVL_HP_R, 7, 1, 1), SOC_DOUBLE_R("Speaker Switch", M98088_REG_3D_LVL_SPK_L, M98088_REG_3E_LVL_SPK_R, 7, 1, 1), SOC_DOUBLE_R("Receiver Switch", M98088_REG_3B_LVL_REC_L, M98088_REG_3C_LVL_REC_R, 7, 1, 1), SOC_SINGLE("MIC1 Volume", M98088_REG_35_LVL_MIC1, 0, 31, 1), SOC_SINGLE("MIC2 Volume", M98088_REG_36_LVL_MIC2, 0, 31, 1), SOC_SINGLE_EXT_TLV("MIC1 Boost Volume", M98088_REG_35_LVL_MIC1, 5, 2, 0, max98088_mic1pre_get, max98088_mic1pre_set, max98088_micboost_tlv), SOC_SINGLE_EXT_TLV("MIC2 Boost Volume", M98088_REG_36_LVL_MIC2, 5, 2, 0, max98088_mic2pre_get, max98088_mic2pre_set, max98088_micboost_tlv), SOC_SINGLE("INA Volume", M98088_REG_37_LVL_INA, 0, 7, 1), SOC_SINGLE("INB Volume", M98088_REG_38_LVL_INB, 0, 7, 1), SOC_SINGLE("ADCL Volume", M98088_REG_33_LVL_ADC_L, 0, 15, 0), SOC_SINGLE("ADCR Volume", M98088_REG_34_LVL_ADC_R, 0, 15, 0), SOC_SINGLE("ADCL Boost Volume", M98088_REG_33_LVL_ADC_L, 4, 3, 0), SOC_SINGLE("ADCR Boost Volume", M98088_REG_34_LVL_ADC_R, 4, 3, 0), SOC_SINGLE("EQ1 Switch", M98088_REG_49_CFG_LEVEL, 0, 1, 0), SOC_SINGLE("EQ2 Switch", M98088_REG_49_CFG_LEVEL, 1, 1, 0), SOC_ENUM("EX Limiter Mode", max98088_exmode_enum), SOC_ENUM("EX Limiter Threshold", max98088_ex_thresh_enum), SOC_ENUM("DAI1 Filter Mode", max98088_filter_mode_enum), SOC_ENUM("DAI1 DAC Filter", max98088_dai1_dac_filter_enum), SOC_ENUM("DAI1 ADC Filter", max98088_dai1_adc_filter_enum), SOC_SINGLE("DAI2 DC Block Switch", M98088_REG_20_DAI2_FILTERS, 0, 1, 0), SOC_SINGLE("ALC Switch", M98088_REG_43_SPKALC_COMP, 7, 1, 0), SOC_SINGLE("ALC Threshold", M98088_REG_43_SPKALC_COMP, 0, 7, 0), SOC_SINGLE("ALC Multiband", M98088_REG_43_SPKALC_COMP, 3, 1, 0), SOC_SINGLE("ALC Release Time", M98088_REG_43_SPKALC_COMP, 4, 7, 0), SOC_SINGLE("PWR Limiter Threshold", M98088_REG_44_PWRLMT_CFG, 4, 15, 0), SOC_SINGLE("PWR Limiter Weight", M98088_REG_44_PWRLMT_CFG, 0, 7, 0), SOC_SINGLE("PWR Limiter Time1", M98088_REG_45_PWRLMT_TIME, 0, 15, 0), SOC_SINGLE("PWR Limiter Time2", M98088_REG_45_PWRLMT_TIME, 4, 15, 0), SOC_SINGLE("THD Limiter Threshold", M98088_REG_46_THDLMT_CFG, 4, 15, 0), SOC_SINGLE("THD Limiter Time", M98088_REG_46_THDLMT_CFG, 0, 7, 0), }; /* Left speaker mixer switch */ static const struct snd_kcontrol_new max98088_left_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2B_MIX_SPK_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2B_MIX_SPK_LEFT, 4, 1, 0), }; /* Right speaker mixer switch */ static const struct snd_kcontrol_new max98088_right_speaker_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_2C_MIX_SPK_RIGHT, 4, 1, 0), }; /* Left headphone mixer switch */ static const struct snd_kcontrol_new max98088_left_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_25_MIX_HP_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_25_MIX_HP_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_25_MIX_HP_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_25_MIX_HP_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_25_MIX_HP_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_25_MIX_HP_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_25_MIX_HP_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_25_MIX_HP_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_25_MIX_HP_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_25_MIX_HP_LEFT, 4, 1, 0), }; /* Right headphone mixer switch */ static const struct snd_kcontrol_new max98088_right_hp_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_26_MIX_HP_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_26_MIX_HP_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_26_MIX_HP_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_26_MIX_HP_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_26_MIX_HP_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_26_MIX_HP_RIGHT, 4, 1, 0), }; /* Left earpiece/receiver mixer switch */ static const struct snd_kcontrol_new max98088_left_rec_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_28_MIX_REC_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_28_MIX_REC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_28_MIX_REC_LEFT, 0, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_28_MIX_REC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_28_MIX_REC_LEFT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_28_MIX_REC_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_28_MIX_REC_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_28_MIX_REC_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_28_MIX_REC_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_28_MIX_REC_LEFT, 4, 1, 0), }; /* Right earpiece/receiver mixer switch */ static const struct snd_kcontrol_new max98088_right_rec_mixer_controls[] = { SOC_DAPM_SINGLE("Left DAC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("Left DAC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("Right DAC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 0, 1, 0), SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_29_MIX_REC_RIGHT, 5, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_29_MIX_REC_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_29_MIX_REC_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_29_MIX_REC_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_29_MIX_REC_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_29_MIX_REC_RIGHT, 4, 1, 0), }; /* Left ADC mixer switch */ static const struct snd_kcontrol_new max98088_left_ADC_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_23_MIX_ADC_LEFT, 7, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_23_MIX_ADC_LEFT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_23_MIX_ADC_LEFT, 3, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_23_MIX_ADC_LEFT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_23_MIX_ADC_LEFT, 1, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_23_MIX_ADC_LEFT, 0, 1, 0), }; /* Right ADC mixer switch */ static const struct snd_kcontrol_new max98088_right_ADC_mixer_controls[] = { SOC_DAPM_SINGLE("MIC1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 7, 1, 0), SOC_DAPM_SINGLE("MIC2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 6, 1, 0), SOC_DAPM_SINGLE("INA1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 3, 1, 0), SOC_DAPM_SINGLE("INA2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 2, 1, 0), SOC_DAPM_SINGLE("INB1 Switch", M98088_REG_24_MIX_ADC_RIGHT, 1, 1, 0), SOC_DAPM_SINGLE("INB2 Switch", M98088_REG_24_MIX_ADC_RIGHT, 0, 1, 0), }; static int max98088_mic_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); switch (event) { case SND_SOC_DAPM_POST_PMU: if (w->reg == M98088_REG_35_LVL_MIC1) { snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, (1+max98088->mic1pre)<<M98088_MICPRE_SHIFT); } else { snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, (1+max98088->mic2pre)<<M98088_MICPRE_SHIFT); } break; case SND_SOC_DAPM_POST_PMD: snd_soc_update_bits(codec, w->reg, M98088_MICPRE_MASK, 0); break; default: return -EINVAL; } return 0; } /* * The line inputs are 2-channel stereo inputs with the left * and right channels sharing a common PGA power control signal. */ static int max98088_line_pga(struct snd_soc_dapm_widget *w, int event, int line, u8 channel) { struct snd_soc_codec *codec = w->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); u8 *state; BUG_ON(!((channel == 1) || (channel == 2))); switch (line) { case LINE_INA: state = &max98088->ina_state; break; case LINE_INB: state = &max98088->inb_state; break; default: return -EINVAL; } switch (event) { case SND_SOC_DAPM_POST_PMU: *state |= channel; snd_soc_update_bits(codec, w->reg, (1 << w->shift), (1 << w->shift)); break; case SND_SOC_DAPM_POST_PMD: *state &= ~channel; if (*state == 0) { snd_soc_update_bits(codec, w->reg, (1 << w->shift), 0); } break; default: return -EINVAL; } return 0; } static int max98088_pga_ina1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INA, 1); } static int max98088_pga_ina2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INA, 2); } static int max98088_pga_inb1_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INB, 1); } static int max98088_pga_inb2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { return max98088_line_pga(w, event, LINE_INB, 2); } static const struct snd_soc_dapm_widget max98088_dapm_widgets[] = { SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 1, 0), SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", M98088_REG_4C_PWR_EN_IN, 0, 0), SND_SOC_DAPM_DAC("DACL1", "HiFi Playback", M98088_REG_4D_PWR_EN_OUT, 1, 0), SND_SOC_DAPM_DAC("DACR1", "HiFi Playback", M98088_REG_4D_PWR_EN_OUT, 0, 0), SND_SOC_DAPM_DAC("DACL2", "Aux Playback", M98088_REG_4D_PWR_EN_OUT, 1, 0), SND_SOC_DAPM_DAC("DACR2", "Aux Playback", M98088_REG_4D_PWR_EN_OUT, 0, 0), SND_SOC_DAPM_PGA("HP Left Out", M98088_REG_4D_PWR_EN_OUT, 7, 0, NULL, 0), SND_SOC_DAPM_PGA("HP Right Out", M98088_REG_4D_PWR_EN_OUT, 6, 0, NULL, 0), SND_SOC_DAPM_PGA("SPK Left Out", M98088_REG_4D_PWR_EN_OUT, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("SPK Right Out", M98088_REG_4D_PWR_EN_OUT, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("REC Left Out", M98088_REG_4D_PWR_EN_OUT, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("REC Right Out", M98088_REG_4D_PWR_EN_OUT, 2, 0, NULL, 0), SND_SOC_DAPM_MUX("External MIC", SND_SOC_NOPM, 0, 0, &max98088_extmic_mux), SND_SOC_DAPM_MIXER("Left HP Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_hp_mixer_controls[0], ARRAY_SIZE(max98088_left_hp_mixer_controls)), SND_SOC_DAPM_MIXER("Right HP Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_hp_mixer_controls[0], ARRAY_SIZE(max98088_right_hp_mixer_controls)), SND_SOC_DAPM_MIXER("Left SPK Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_speaker_mixer_controls[0], ARRAY_SIZE(max98088_left_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Right SPK Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_speaker_mixer_controls[0], ARRAY_SIZE(max98088_right_speaker_mixer_controls)), SND_SOC_DAPM_MIXER("Left REC Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_rec_mixer_controls[0], ARRAY_SIZE(max98088_left_rec_mixer_controls)), SND_SOC_DAPM_MIXER("Right REC Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_rec_mixer_controls[0], ARRAY_SIZE(max98088_right_rec_mixer_controls)), SND_SOC_DAPM_MIXER("Left ADC Mixer", SND_SOC_NOPM, 0, 0, &max98088_left_ADC_mixer_controls[0], ARRAY_SIZE(max98088_left_ADC_mixer_controls)), SND_SOC_DAPM_MIXER("Right ADC Mixer", SND_SOC_NOPM, 0, 0, &max98088_right_ADC_mixer_controls[0], ARRAY_SIZE(max98088_right_ADC_mixer_controls)), SND_SOC_DAPM_PGA_E("MIC1 Input", M98088_REG_35_LVL_MIC1, 5, 0, NULL, 0, max98088_mic_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("MIC2 Input", M98088_REG_36_LVL_MIC2, 5, 0, NULL, 0, max98088_mic_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INA1 Input", M98088_REG_4C_PWR_EN_IN, 7, 0, NULL, 0, max98088_pga_ina1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INA2 Input", M98088_REG_4C_PWR_EN_IN, 7, 0, NULL, 0, max98088_pga_ina2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INB1 Input", M98088_REG_4C_PWR_EN_IN, 6, 0, NULL, 0, max98088_pga_inb1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("INB2 Input", M98088_REG_4C_PWR_EN_IN, 6, 0, NULL, 0, max98088_pga_inb2_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MICBIAS("MICBIAS", M98088_REG_4C_PWR_EN_IN, 3, 0), SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("SPKL"), SND_SOC_DAPM_OUTPUT("SPKR"), SND_SOC_DAPM_OUTPUT("RECL"), SND_SOC_DAPM_OUTPUT("RECR"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_INPUT("INA1"), SND_SOC_DAPM_INPUT("INA2"), SND_SOC_DAPM_INPUT("INB1"), SND_SOC_DAPM_INPUT("INB2"), }; static const struct snd_soc_dapm_route max98088_audio_map[] = { /* Left headphone output mixer */ {"Left HP Mixer", "Left DAC1 Switch", "DACL1"}, {"Left HP Mixer", "Left DAC2 Switch", "DACL2"}, {"Left HP Mixer", "Right DAC1 Switch", "DACR1"}, {"Left HP Mixer", "Right DAC2 Switch", "DACR2"}, {"Left HP Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left HP Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left HP Mixer", "INA1 Switch", "INA1 Input"}, {"Left HP Mixer", "INA2 Switch", "INA2 Input"}, {"Left HP Mixer", "INB1 Switch", "INB1 Input"}, {"Left HP Mixer", "INB2 Switch", "INB2 Input"}, /* Right headphone output mixer */ {"Right HP Mixer", "Left DAC1 Switch", "DACL1"}, {"Right HP Mixer", "Left DAC2 Switch", "DACL2" }, {"Right HP Mixer", "Right DAC1 Switch", "DACR1"}, {"Right HP Mixer", "Right DAC2 Switch", "DACR2"}, {"Right HP Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right HP Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right HP Mixer", "INA1 Switch", "INA1 Input"}, {"Right HP Mixer", "INA2 Switch", "INA2 Input"}, {"Right HP Mixer", "INB1 Switch", "INB1 Input"}, {"Right HP Mixer", "INB2 Switch", "INB2 Input"}, /* Left speaker output mixer */ {"Left SPK Mixer", "Left DAC1 Switch", "DACL1"}, {"Left SPK Mixer", "Left DAC2 Switch", "DACL2"}, {"Left SPK Mixer", "Right DAC1 Switch", "DACR1"}, {"Left SPK Mixer", "Right DAC2 Switch", "DACR2"}, {"Left SPK Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left SPK Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left SPK Mixer", "INA1 Switch", "INA1 Input"}, {"Left SPK Mixer", "INA2 Switch", "INA2 Input"}, {"Left SPK Mixer", "INB1 Switch", "INB1 Input"}, {"Left SPK Mixer", "INB2 Switch", "INB2 Input"}, /* Right speaker output mixer */ {"Right SPK Mixer", "Left DAC1 Switch", "DACL1"}, {"Right SPK Mixer", "Left DAC2 Switch", "DACL2"}, {"Right SPK Mixer", "Right DAC1 Switch", "DACR1"}, {"Right SPK Mixer", "Right DAC2 Switch", "DACR2"}, {"Right SPK Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right SPK Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right SPK Mixer", "INA1 Switch", "INA1 Input"}, {"Right SPK Mixer", "INA2 Switch", "INA2 Input"}, {"Right SPK Mixer", "INB1 Switch", "INB1 Input"}, {"Right SPK Mixer", "INB2 Switch", "INB2 Input"}, /* Earpiece/Receiver output mixer */ {"Left REC Mixer", "Left DAC1 Switch", "DACL1"}, {"Left REC Mixer", "Left DAC2 Switch", "DACL2"}, {"Left REC Mixer", "Right DAC1 Switch", "DACR1"}, {"Left REC Mixer", "Right DAC2 Switch", "DACR2"}, {"Left REC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left REC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left REC Mixer", "INA1 Switch", "INA1 Input"}, {"Left REC Mixer", "INA2 Switch", "INA2 Input"}, {"Left REC Mixer", "INB1 Switch", "INB1 Input"}, {"Left REC Mixer", "INB2 Switch", "INB2 Input"}, /* Earpiece/Receiver output mixer */ {"Right REC Mixer", "Left DAC1 Switch", "DACL1"}, {"Right REC Mixer", "Left DAC2 Switch", "DACL2"}, {"Right REC Mixer", "Right DAC1 Switch", "DACR1"}, {"Right REC Mixer", "Right DAC2 Switch", "DACR2"}, {"Right REC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right REC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right REC Mixer", "INA1 Switch", "INA1 Input"}, {"Right REC Mixer", "INA2 Switch", "INA2 Input"}, {"Right REC Mixer", "INB1 Switch", "INB1 Input"}, {"Right REC Mixer", "INB2 Switch", "INB2 Input"}, {"HP Left Out", NULL, "Left HP Mixer"}, {"HP Right Out", NULL, "Right HP Mixer"}, {"SPK Left Out", NULL, "Left SPK Mixer"}, {"SPK Right Out", NULL, "Right SPK Mixer"}, {"REC Left Out", NULL, "Left REC Mixer"}, {"REC Right Out", NULL, "Right REC Mixer"}, {"HPL", NULL, "HP Left Out"}, {"HPR", NULL, "HP Right Out"}, {"SPKL", NULL, "SPK Left Out"}, {"SPKR", NULL, "SPK Right Out"}, {"RECL", NULL, "REC Left Out"}, {"RECR", NULL, "REC Right Out"}, /* Left ADC input mixer */ {"Left ADC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Left ADC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Left ADC Mixer", "INA1 Switch", "INA1 Input"}, {"Left ADC Mixer", "INA2 Switch", "INA2 Input"}, {"Left ADC Mixer", "INB1 Switch", "INB1 Input"}, {"Left ADC Mixer", "INB2 Switch", "INB2 Input"}, /* Right ADC input mixer */ {"Right ADC Mixer", "MIC1 Switch", "MIC1 Input"}, {"Right ADC Mixer", "MIC2 Switch", "MIC2 Input"}, {"Right ADC Mixer", "INA1 Switch", "INA1 Input"}, {"Right ADC Mixer", "INA2 Switch", "INA2 Input"}, {"Right ADC Mixer", "INB1 Switch", "INB1 Input"}, {"Right ADC Mixer", "INB2 Switch", "INB2 Input"}, /* Inputs */ {"ADCL", NULL, "Left ADC Mixer"}, {"ADCR", NULL, "Right ADC Mixer"}, {"INA1 Input", NULL, "INA1"}, {"INA2 Input", NULL, "INA2"}, {"INB1 Input", NULL, "INB1"}, {"INB2 Input", NULL, "INB2"}, {"MIC1 Input", NULL, "MIC1"}, {"MIC2 Input", NULL, "MIC2"}, }; /* codec mclk clock divider coefficients */ static const struct { u32 rate; u8 sr; } rate_table[] = { {8000, 0x10}, {11025, 0x20}, {16000, 0x30}, {22050, 0x40}, {24000, 0x50}, {32000, 0x60}, {44100, 0x70}, {48000, 0x80}, {88200, 0x90}, {96000, 0xA0}, }; static inline int rate_value(int rate, u8 *value) { int i; for (i = 0; i < ARRAY_SIZE(rate_table); i++) { if (rate_table[i].rate >= rate) { *value = rate_table[i].sr; return 0; } } *value = rate_table[0].sr; return -EINVAL; } static int max98088_dai1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; unsigned long long ni; unsigned int rate; u8 regval; cdata = &max98088->dai[0]; rate = params_rate(params); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_WS, 0); break; case SNDRV_PCM_FORMAT_S24_LE: snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_WS, M98088_DAI_WS); break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); if (rate_value(rate, &regval)) return -EINVAL; snd_soc_update_bits(codec, M98088_REG_11_DAI1_CLKMODE, M98088_CLKMODE_MASK, regval); cdata->rate = rate; /* Configure NI when operating as master */ if (snd_soc_read(codec, M98088_REG_14_DAI1_FORMAT) & M98088_DAI_MAS) { if (max98088->sysclk == 0) { dev_err(codec->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; do_div(ni, (unsigned long long int)max98088->sysclk); snd_soc_write(codec, M98088_REG_12_DAI1_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_write(codec, M98088_REG_13_DAI1_CLKCFG_LO, ni & 0xFF); } /* Update sample rate mode */ if (rate < 50000) snd_soc_update_bits(codec, M98088_REG_18_DAI1_FILTERS, M98088_DAI_DHF, 0); else snd_soc_update_bits(codec, M98088_REG_18_DAI1_FILTERS, M98088_DAI_DHF, M98088_DAI_DHF); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); return 0; } static int max98088_dai2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; unsigned long long ni; unsigned int rate; u8 regval; cdata = &max98088->dai[1]; rate = params_rate(params); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_WS, 0); break; case SNDRV_PCM_FORMAT_S24_LE: snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_WS, M98088_DAI_WS); break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); if (rate_value(rate, &regval)) return -EINVAL; snd_soc_update_bits(codec, M98088_REG_19_DAI2_CLKMODE, M98088_CLKMODE_MASK, regval); cdata->rate = rate; /* Configure NI when operating as master */ if (snd_soc_read(codec, M98088_REG_1C_DAI2_FORMAT) & M98088_DAI_MAS) { if (max98088->sysclk == 0) { dev_err(codec->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; do_div(ni, (unsigned long long int)max98088->sysclk); snd_soc_write(codec, M98088_REG_1A_DAI2_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_write(codec, M98088_REG_1B_DAI2_CLKCFG_LO, ni & 0xFF); } /* Update sample rate mode */ if (rate < 50000) snd_soc_update_bits(codec, M98088_REG_20_DAI2_FILTERS, M98088_DAI_DHF, 0); else snd_soc_update_bits(codec, M98088_REG_20_DAI2_FILTERS, M98088_DAI_DHF, M98088_DAI_DHF); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); return 0; } static int max98088_dai_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); /* Requested clock frequency is already setup */ if (freq == max98088->sysclk) return 0; /* Setup clocks for slave mode, and using the PLL * PSCLK = 0x01 (when master clk is 10MHz to 20MHz) * 0x02 (when master clk is 20MHz to 30MHz).. */ if ((freq >= 10000000) && (freq < 20000000)) { snd_soc_write(codec, M98088_REG_10_SYS_CLK, 0x10); } else if ((freq >= 20000000) && (freq < 30000000)) { snd_soc_write(codec, M98088_REG_10_SYS_CLK, 0x20); } else { dev_err(codec->dev, "Invalid master clock frequency\n"); return -EINVAL; } if (snd_soc_read(codec, M98088_REG_51_PWR_SYS) & M98088_SHDNRUN) { snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, 0); snd_soc_update_bits(codec, M98088_REG_51_PWR_SYS, M98088_SHDNRUN, M98088_SHDNRUN); } dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq); max98088->sysclk = freq; return 0; } static int max98088_dai1_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; u8 reg15val; u8 reg14val = 0; cdata = &max98088->dai[0]; if (fmt != cdata->fmt) { cdata->fmt = fmt; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* Slave mode PLL */ snd_soc_write(codec, M98088_REG_12_DAI1_CLKCFG_HI, 0x80); snd_soc_write(codec, M98088_REG_13_DAI1_CLKCFG_LO, 0x00); break; case SND_SOC_DAIFMT_CBM_CFM: /* Set to master mode */ reg14val |= M98088_DAI_MAS; break; case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: default: dev_err(codec->dev, "Clock mode unsupported"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: reg14val |= M98088_DAI_DLY; break; case SND_SOC_DAIFMT_LEFT_J: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: reg14val |= M98088_DAI_WCI; break; case SND_SOC_DAIFMT_IB_NF: reg14val |= M98088_DAI_BCI; break; case SND_SOC_DAIFMT_IB_IF: reg14val |= M98088_DAI_BCI|M98088_DAI_WCI; break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_14_DAI1_FORMAT, M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI | M98088_DAI_WCI, reg14val); reg15val = M98088_DAI_BSEL64; if (max98088->digmic) reg15val |= M98088_DAI_OSR64; snd_soc_write(codec, M98088_REG_15_DAI1_CLOCK, reg15val); } return 0; } static int max98088_dai2_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; u8 reg1Cval = 0; cdata = &max98088->dai[1]; if (fmt != cdata->fmt) { cdata->fmt = fmt; switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* Slave mode PLL */ snd_soc_write(codec, M98088_REG_1A_DAI2_CLKCFG_HI, 0x80); snd_soc_write(codec, M98088_REG_1B_DAI2_CLKCFG_LO, 0x00); break; case SND_SOC_DAIFMT_CBM_CFM: /* Set to master mode */ reg1Cval |= M98088_DAI_MAS; break; case SND_SOC_DAIFMT_CBS_CFM: case SND_SOC_DAIFMT_CBM_CFS: default: dev_err(codec->dev, "Clock mode unsupported"); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: reg1Cval |= M98088_DAI_DLY; break; case SND_SOC_DAIFMT_LEFT_J: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: reg1Cval |= M98088_DAI_WCI; break; case SND_SOC_DAIFMT_IB_NF: reg1Cval |= M98088_DAI_BCI; break; case SND_SOC_DAIFMT_IB_IF: reg1Cval |= M98088_DAI_BCI|M98088_DAI_WCI; break; default: return -EINVAL; } snd_soc_update_bits(codec, M98088_REG_1C_DAI2_FORMAT, M98088_DAI_MAS | M98088_DAI_DLY | M98088_DAI_BCI | M98088_DAI_WCI, reg1Cval); snd_soc_write(codec, M98088_REG_1D_DAI2_CLOCK, M98088_DAI_BSEL64); } return 0; } static int max98088_dai1_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; int reg; if (mute) reg = M98088_DAI_MUTE; else reg = 0; snd_soc_update_bits(codec, M98088_REG_2F_LVL_DAI1_PLAY, M98088_DAI_MUTE_MASK, reg); return 0; } static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; int reg; if (mute) reg = M98088_DAI_MUTE; else reg = 0; snd_soc_update_bits(codec, M98088_REG_31_LVL_DAI2_PLAY, M98088_DAI_MUTE_MASK, reg); return 0; } static void max98088_sync_cache(struct snd_soc_codec *codec) { u16 *reg_cache = codec->reg_cache; int i; if (!codec->cache_sync) return; codec->cache_only = 0; /* write back cached values if they're writeable and * different from the hardware default. */ for (i = 1; i < codec->driver->reg_cache_size; i++) { if (!max98088_access[i].writable) continue; if (reg_cache[i] == max98088_reg[i]) continue; snd_soc_write(codec, i, reg_cache[i]); } codec->cache_sync = 0; } static int max98088_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) max98088_sync_cache(codec); snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN, M98088_MBEN, M98088_MBEN); break; case SND_SOC_BIAS_OFF: snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN, M98088_MBEN, 0); codec->cache_sync = 1; break; } codec->dapm.bias_level = level; return 0; } #define MAX98088_RATES SNDRV_PCM_RATE_8000_96000 #define MAX98088_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops max98088_dai1_ops = { .set_sysclk = max98088_dai_set_sysclk, .set_fmt = max98088_dai1_set_fmt, .hw_params = max98088_dai1_hw_params, .digital_mute = max98088_dai1_digital_mute, }; static const struct snd_soc_dai_ops max98088_dai2_ops = { .set_sysclk = max98088_dai_set_sysclk, .set_fmt = max98088_dai2_set_fmt, .hw_params = max98088_dai2_hw_params, .digital_mute = max98088_dai2_digital_mute, }; static struct snd_soc_dai_driver max98088_dai[] = { { .name = "HiFi", .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .ops = &max98088_dai1_ops, }, { .name = "Aux", .playback = { .stream_name = "Aux Playback", .channels_min = 1, .channels_max = 2, .rates = MAX98088_RATES, .formats = MAX98088_FORMATS, }, .ops = &max98088_dai2_ops, } }; static const char *eq_mode_name[] = {"EQ1 Mode", "EQ2 Mode"}; static int max98088_get_channel(struct snd_soc_codec *codec, const char *name) { int i; for (i = 0; i < ARRAY_SIZE(eq_mode_name); i++) if (strcmp(name, eq_mode_name[i]) == 0) return i; /* Shouldn't happen */ dev_err(codec->dev, "Bad EQ channel name '%s'\n", name); return -EINVAL; } static void max98088_setup_eq1(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *coef_set; int best, best_val, save, i, sel, fs; struct max98088_cdata *cdata; cdata = &max98088->dai[0]; if (!pdata || !max98088->eq_textcnt) return; /* Find the selected configuration with nearest sample rate */ fs = cdata->rate; sel = cdata->eq_sel; best = 0; best_val = INT_MAX; for (i = 0; i < pdata->eq_cfgcnt; i++) { if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 && abs(pdata->eq_cfg[i].rate - fs) < best_val) { best = i; best_val = abs(pdata->eq_cfg[i].rate - fs); } } dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n", pdata->eq_cfg[best].name, pdata->eq_cfg[best].rate, fs); /* Disable EQ while configuring, and save current on/off state */ save = snd_soc_read(codec, M98088_REG_49_CFG_LEVEL); snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, 0); coef_set = &pdata->eq_cfg[sel]; m98088_eq_band(codec, 0, 0, coef_set->band1); m98088_eq_band(codec, 0, 1, coef_set->band2); m98088_eq_band(codec, 0, 2, coef_set->band3); m98088_eq_band(codec, 0, 3, coef_set->band4); m98088_eq_band(codec, 0, 4, coef_set->band5); /* Restore the original on/off state */ snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ1EN, save); } static void max98088_setup_eq2(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *coef_set; int best, best_val, save, i, sel, fs; struct max98088_cdata *cdata; cdata = &max98088->dai[1]; if (!pdata || !max98088->eq_textcnt) return; /* Find the selected configuration with nearest sample rate */ fs = cdata->rate; sel = cdata->eq_sel; best = 0; best_val = INT_MAX; for (i = 0; i < pdata->eq_cfgcnt; i++) { if (strcmp(pdata->eq_cfg[i].name, max98088->eq_texts[sel]) == 0 && abs(pdata->eq_cfg[i].rate - fs) < best_val) { best = i; best_val = abs(pdata->eq_cfg[i].rate - fs); } } dev_dbg(codec->dev, "Selected %s/%dHz for %dHz sample rate\n", pdata->eq_cfg[best].name, pdata->eq_cfg[best].rate, fs); /* Disable EQ while configuring, and save current on/off state */ save = snd_soc_read(codec, M98088_REG_49_CFG_LEVEL); snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN, 0); coef_set = &pdata->eq_cfg[sel]; m98088_eq_band(codec, 1, 0, coef_set->band1); m98088_eq_band(codec, 1, 1, coef_set->band2); m98088_eq_band(codec, 1, 2, coef_set->band3); m98088_eq_band(codec, 1, 3, coef_set->band4); m98088_eq_band(codec, 1, 4, coef_set->band5); /* Restore the original on/off state */ snd_soc_update_bits(codec, M98088_REG_49_CFG_LEVEL, M98088_EQ2EN, save); } static int max98088_put_eq_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; int channel = max98088_get_channel(codec, kcontrol->id.name); struct max98088_cdata *cdata; int sel = ucontrol->value.integer.value[0]; if (channel < 0) return channel; cdata = &max98088->dai[channel]; if (sel >= pdata->eq_cfgcnt) return -EINVAL; cdata->eq_sel = sel; switch (channel) { case 0: max98088_setup_eq1(codec); break; case 1: max98088_setup_eq2(codec); break; } return 0; } static int max98088_get_eq_enum(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); int channel = max98088_get_channel(codec, kcontrol->id.name); struct max98088_cdata *cdata; if (channel < 0) return channel; cdata = &max98088->dai[channel]; ucontrol->value.enumerated.item[0] = cdata->eq_sel; return 0; } static void max98088_handle_eq_pdata(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; struct max98088_eq_cfg *cfg; unsigned int cfgcnt; int i, j; const char **t; int ret; struct snd_kcontrol_new controls[] = { SOC_ENUM_EXT((char *)eq_mode_name[0], max98088->eq_enum, max98088_get_eq_enum, max98088_put_eq_enum), SOC_ENUM_EXT((char *)eq_mode_name[1], max98088->eq_enum, max98088_get_eq_enum, max98088_put_eq_enum), }; BUILD_BUG_ON(ARRAY_SIZE(controls) != ARRAY_SIZE(eq_mode_name)); cfg = pdata->eq_cfg; cfgcnt = pdata->eq_cfgcnt; /* Setup an array of texts for the equalizer enum. * This is based on Mark Brown's equalizer driver code. */ max98088->eq_textcnt = 0; max98088->eq_texts = NULL; for (i = 0; i < cfgcnt; i++) { for (j = 0; j < max98088->eq_textcnt; j++) { if (strcmp(cfg[i].name, max98088->eq_texts[j]) == 0) break; } if (j != max98088->eq_textcnt) continue; /* Expand the array */ t = krealloc(max98088->eq_texts, sizeof(char *) * (max98088->eq_textcnt + 1), GFP_KERNEL); if (t == NULL) continue; /* Store the new entry */ t[max98088->eq_textcnt] = cfg[i].name; max98088->eq_textcnt++; max98088->eq_texts = t; } /* Now point the soc_enum to .texts array items */ max98088->eq_enum.texts = max98088->eq_texts; max98088->eq_enum.max = max98088->eq_textcnt; ret = snd_soc_add_codec_controls(codec, controls, ARRAY_SIZE(controls)); if (ret != 0) dev_err(codec->dev, "Failed to add EQ control: %d\n", ret); } static void max98088_handle_pdata(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_pdata *pdata = max98088->pdata; u8 regval = 0; if (!pdata) { dev_dbg(codec->dev, "No platform data\n"); return; } /* Configure mic for analog/digital mic mode */ if (pdata->digmic_left_mode) regval |= M98088_DIGMIC_L; if (pdata->digmic_right_mode) regval |= M98088_DIGMIC_R; max98088->digmic = (regval ? 1 : 0); snd_soc_write(codec, M98088_REG_48_CFG_MIC, regval); /* Configure receiver output */ regval = ((pdata->receiver_mode) ? M98088_REC_LINEMODE : 0); snd_soc_update_bits(codec, M98088_REG_2A_MIC_REC_CNTL, M98088_REC_LINEMODE_MASK, regval); /* Configure equalizers */ if (pdata->eq_cfgcnt) max98088_handle_eq_pdata(codec); } #ifdef CONFIG_PM static int max98088_suspend(struct snd_soc_codec *codec) { max98088_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int max98088_resume(struct snd_soc_codec *codec) { max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } #else #define max98088_suspend NULL #define max98088_resume NULL #endif static int max98088_probe(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); struct max98088_cdata *cdata; int ret = 0; codec->cache_sync = 1; ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* initialize private data */ max98088->sysclk = (unsigned)-1; max98088->eq_textcnt = 0; cdata = &max98088->dai[0]; cdata->rate = (unsigned)-1; cdata->fmt = (unsigned)-1; cdata->eq_sel = 0; cdata = &max98088->dai[1]; cdata->rate = (unsigned)-1; cdata->fmt = (unsigned)-1; cdata->eq_sel = 0; max98088->ina_state = 0; max98088->inb_state = 0; max98088->ex_mode = 0; max98088->digmic = 0; max98088->mic1pre = 0; max98088->mic2pre = 0; ret = snd_soc_read(codec, M98088_REG_FF_REV_ID); if (ret < 0) { dev_err(codec->dev, "Failed to read device revision: %d\n", ret); goto err_access; } dev_info(codec->dev, "revision %c\n", ret + 'A'); snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV); /* initialize registers cache to hardware default */ max98088_set_bias_level(codec, SND_SOC_BIAS_STANDBY); snd_soc_write(codec, M98088_REG_0F_IRQ_ENABLE, 0x00); snd_soc_write(codec, M98088_REG_22_MIX_DAC, M98088_DAI1L_TO_DACL|M98088_DAI2L_TO_DACL| M98088_DAI1R_TO_DACR|M98088_DAI2R_TO_DACR); snd_soc_write(codec, M98088_REG_4E_BIAS_CNTL, 0xF0); snd_soc_write(codec, M98088_REG_50_DAC_BIAS2, 0x0F); snd_soc_write(codec, M98088_REG_16_DAI1_IOCFG, M98088_S1NORMAL|M98088_SDATA); snd_soc_write(codec, M98088_REG_1E_DAI2_IOCFG, M98088_S2NORMAL|M98088_SDATA); max98088_handle_pdata(codec); snd_soc_add_codec_controls(codec, max98088_snd_controls, ARRAY_SIZE(max98088_snd_controls)); err_access: return ret; } static int max98088_remove(struct snd_soc_codec *codec) { struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec); max98088_set_bias_level(codec, SND_SOC_BIAS_OFF); kfree(max98088->eq_texts); return 0; } static struct snd_soc_codec_driver soc_codec_dev_max98088 = { .probe = max98088_probe, .remove = max98088_remove, .suspend = max98088_suspend, .resume = max98088_resume, .set_bias_level = max98088_set_bias_level, .reg_cache_size = ARRAY_SIZE(max98088_reg), .reg_word_size = sizeof(u8), .reg_cache_default = max98088_reg, .volatile_register = max98088_volatile_register, .dapm_widgets = max98088_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets), .dapm_routes = max98088_audio_map, .num_dapm_routes = ARRAY_SIZE(max98088_audio_map), }; static int max98088_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max98088_priv *max98088; int ret; max98088 = devm_kzalloc(&i2c->dev, sizeof(struct max98088_priv), GFP_KERNEL); if (max98088 == NULL) return -ENOMEM; max98088->devtype = id->driver_data; i2c_set_clientdata(i2c, max98088); max98088->pdata = i2c->dev.platform_data; ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98088, &max98088_dai[0], 2); return ret; } static int max98088_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id max98088_i2c_id[] = { { "max98088", MAX98088 }, { "max98089", MAX98089 }, { } }; MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); static struct i2c_driver max98088_i2c_driver = { .driver = { .name = "max98088", .owner = THIS_MODULE, }, .probe = max98088_i2c_probe, .remove = max98088_i2c_remove, .id_table = max98088_i2c_id, }; module_i2c_driver(max98088_i2c_driver); MODULE_DESCRIPTION("ALSA SoC MAX98088 driver"); MODULE_AUTHOR("Peter Hsiang, Jesse Marroquin"); MODULE_LICENSE("GPL");
gpl-2.0
bbbLinux/kernel
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
102
113358
/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/init.h> #include <linux/delay.h> #include "cxgb4.h" #include "t4_regs.h" #include "t4fw_api.h" /** * t4_wait_op_done_val - wait until an operation is completed * @adapter: the adapter performing the operation * @reg: the register to check for completion * @mask: a single-bit field within @reg that indicates completion * @polarity: the value of the field when the operation is completed * @attempts: number of check iterations * @delay: delay in usecs between iterations * @valp: where to store the value of the register at completion time * * Wait until an operation is completed by checking a bit in a register * up to @attempts times. If @valp is not NULL the value of the register * at the time it indicated completion is stored there. Returns 0 if the * operation completes and -EAGAIN otherwise. */ static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay, u32 *valp) { while (1) { u32 val = t4_read_reg(adapter, reg); if (!!(val & mask) == polarity) { if (valp) *valp = val; return 0; } if (--attempts == 0) return -EAGAIN; if (delay) udelay(delay); } } static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, int polarity, int attempts, int delay) { return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, delay, NULL); } /** * t4_set_reg_field - set a register field to a value * @adapter: the adapter to program * @addr: the register address * @mask: specifies the portion of the register to modify * @val: the new value for the register field * * Sets a register field specified by the supplied mask to the * given value. */ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, u32 val) { u32 v = t4_read_reg(adapter, addr) & ~mask; t4_write_reg(adapter, addr, v | val); (void) t4_read_reg(adapter, addr); /* flush */ } /** * t4_read_indirect - read indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect address * @data_reg: register holding the value of the indirect register * @vals: where the read register values are stored * @nregs: how many indirect registers to read * @start_idx: index of first indirect register to read * * Reads registers that are accessed indirectly through an address/data * register pair. */ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx); *vals++ = t4_read_reg(adap, data_reg); start_idx++; } } /** * t4_write_indirect - write indirectly addressed registers * @adap: the adapter * @addr_reg: register holding the indirect addresses * @data_reg: register holding the value for the indirect registers * @vals: values to write * @nregs: how many indirect registers to write * @start_idx: address of first indirect register to write * * Writes a sequential block of registers that are accessed indirectly * through an address/data register pair. */ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx) { while (nregs--) { t4_write_reg(adap, addr_reg, start_idx++); t4_write_reg(adap, data_reg, *vals++); } } /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, u32 mbox_addr) { for ( ; nflit; nflit--, mbox_addr += 8) *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); } /* * Handle a FW assertion reported in a mailbox. */ static void fw_asrt(struct adapter *adap, u32 mbox_addr) { struct fw_debug_cmd asrt; get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); dev_alert(adap->pdev_dev, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); } static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) { dev_err(adap->pdev_dev, "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, (unsigned long long)t4_read_reg64(adap, data_reg), (unsigned long long)t4_read_reg64(adap, data_reg + 8), (unsigned long long)t4_read_reg64(adap, data_reg + 16), (unsigned long long)t4_read_reg64(adap, data_reg + 24), (unsigned long long)t4_read_reg64(adap, data_reg + 32), (unsigned long long)t4_read_reg64(adap, data_reg + 40), (unsigned long long)t4_read_reg64(adap, data_reg + 48), (unsigned long long)t4_read_reg64(adap, data_reg + 56)); } /** * t4_wr_mbox_meat - send a command to FW through the given mailbox * @adap: the adapter * @mbox: index of the mailbox to use * @cmd: the command to write * @size: command length in bytes * @rpl: where to optionally store the reply * @sleep_ok: if true we may sleep while awaiting command completion * * Sends the given command to FW through the selected mailbox and waits * for the FW to execute the command. If @rpl is not %NULL it is used to * store the FW's reply to the command. The command and its optional * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms * to respond. @sleep_ok determines whether we may sleep while awaiting * the response. If sleeping is allowed we use progressive backoff * otherwise we spin. * * The return value is 0 on success or a negative errno on failure. A * failure can happen either because we are not able to execute the * command or FW executes it but signals an error. In the latter case * the return value is the error code indicated by FW (negated). */ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, void *rpl, bool sleep_ok) { static const int delay[] = { 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; u32 v; u64 res; int i, ms, delay_idx; const __be64 *p = cmd; u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); if ((size & 15) || size > MBOX_LEN) return -EINVAL; /* * If the device is off-line, as in EEH, commands will time out. * Fail them early so we don't waste time waiting. */ if (adap->pdev->error_state != pci_channel_io_normal) return -EIO; v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); if (v != MBOX_OWNER_DRV) return v ? -EBUSY : -ETIMEDOUT; for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adap, ctl_reg); /* flush write */ delay_idx = 0; ms = delay[0]; for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) delay_idx++; msleep(ms); } else mdelay(ms); v = t4_read_reg(adap, ctl_reg); if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { if (!(v & MBMSGVALID)) { t4_write_reg(adap, ctl_reg, 0); continue; } res = t4_read_reg64(adap, data_reg); if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); res = FW_CMD_RETVAL(EIO); } else if (rpl) get_mbox_rpl(adap, rpl, size / 8, data_reg); if (FW_CMD_RETVAL_GET((int)res)) dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); return -FW_CMD_RETVAL_GET((int)res); } } dump_mbox(adap, mbox, data_reg); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); return -ETIMEDOUT; } /** * t4_mc_read - read from MC through backdoor accesses * @adap: the adapter * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from MC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) { int i; if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) return -EBUSY; t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); t4_write_reg(adap, MC_BIST_CMD_LEN, 64); t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1)); i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); if (i) return i; #define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, MC_DATA(16)); #undef MC_DATA return 0; } /** * t4_edc_read - read from EDC through backdoor accesses * @adap: the adapter * @idx: which EDC to access * @addr: address of first byte requested * @data: 64 bytes of data containing the requested address * @ecc: where to store the corresponding 64-bit ECC word * * Read 64 bytes of data from EDC starting at a 64-byte-aligned address * that covers the requested address @addr. If @parity is not %NULL it * is assigned the 64-bit ECC word for the read data. */ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) { int i; idx *= EDC_STRIDE; if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) return -EBUSY; t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); t4_write_reg(adap, EDC_BIST_CMD + idx, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); if (i) return i; #define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) for (i = 15; i >= 0; i--) *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); if (ecc) *ecc = t4_read_reg64(adap, EDC_DATA(16)); #undef EDC_DATA return 0; } /* * t4_mem_win_rw - read/write memory through PCIE memory window * @adap: the adapter * @addr: address of first byte requested * @data: MEMWIN0_APERTURE bytes of data containing the requested address * @dir: direction of transfer 1 => read, 0 => write * * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a * MEMWIN0_APERTURE-byte-aligned address that covers the requested * address @addr. */ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) { int i; /* * Setup offset into PCIE memory window. Address must be a * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to * ensure that changes propagate before we attempt to use the new * values.) */ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~(MEMWIN0_APERTURE - 1)); t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { if (dir) *data++ = (__force __be32) t4_read_reg(adap, (MEMWIN0_BASE + i)); else t4_write_reg(adap, (MEMWIN0_BASE + i), (__force u32) *data++); } return 0; } /** * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window * @adap: the adapter * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to transfer * @buf: host memory buffer * @dir: direction of transfer 1 => read, 0 => write * * Reads/writes an [almost] arbitrary memory region in the firmware: the * firmware memory address, length and host buffer must be aligned on * 32-bit boudaries. The memory is transferred as a raw byte sequence * from/to the firmware's memory. If this memory contains data * structures which contain multi-byte integers, it's the callers * responsibility to perform appropriate byte order conversions. */ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf, int dir) { u32 pos, start, end, offset, memoffset; int ret = 0; __be32 *data; /* * Argument sanity checks ... */ if ((addr & 0x3) || (len & 0x3)) return -EINVAL; data = vmalloc(MEMWIN0_APERTURE); if (!data) return -ENOMEM; /* * Offset into the region of memory which is being accessed * MEM_EDC0 = 0 * MEM_EDC1 = 1 * MEM_MC = 2 */ memoffset = (mtype * (5 * 1024 * 1024)); /* Determine the PCIE_MEM_ACCESS_OFFSET */ addr = addr + memoffset; /* * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes * at a time so we need to round down the start and round up the end. * We'll start copying out of the first line at (addr - start) a word * at a time. */ start = addr & ~(MEMWIN0_APERTURE-1); end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); offset = (addr - start)/sizeof(__be32); for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { /* * If we're writing, copy the data from the caller's memory * buffer */ if (!dir) { /* * If we're doing a partial write, then we need to do * a read-modify-write ... */ if (offset || len < MEMWIN0_APERTURE) { ret = t4_mem_win_rw(adap, pos, data, 1); if (ret) break; } while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && len > 0) { data[offset++] = *buf++; len -= sizeof(__be32); } } /* * Transfer a block of memory and bail if there's an error. */ ret = t4_mem_win_rw(adap, pos, data, dir); if (ret) break; /* * If we're reading, copy the data into the caller's memory * buffer. */ if (dir) while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && len > 0) { *buf++ = data[offset++]; len -= sizeof(__be32); } } vfree(data); return ret; } int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { return t4_memory_rw(adap, mtype, addr, len, buf, 0); } #define EEPROM_STAT_ADDR 0x7bfc #define VPD_BASE 0 #define VPD_LEN 512 /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter * @enable: whether to enable or disable write protection * * Enables or disables write protection on the serial EEPROM. */ int t4_seeprom_wp(struct adapter *adapter, bool enable) { unsigned int v = enable ? 0xc : 0; int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); return ret < 0 ? ret : 0; } /** * get_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { u32 cclk_param, cclk_val; int i, ret; int ec, sn; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; vpd = vmalloc(VPD_LEN); if (!vpd) return -ENOMEM; ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); if (ret < 0) goto out; if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { dev_err(adapter->pdev_dev, "missing VPD ID string\n"); ret = -EINVAL; goto out; } id_len = pci_vpd_lrdt_size(vpd); if (id_len > ID_LEN) id_len = ID_LEN; i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) { dev_err(adapter->pdev_dev, "missing VPD-R section\n"); ret = -EINVAL; goto out; } vpdr_len = pci_vpd_lrdt_size(&vpd[i]); kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; if (vpdr_len + kw_offset > VPD_LEN) { dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); ret = -EINVAL; goto out; } #define FIND_VPD_KW(var, name) do { \ var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ if (var < 0) { \ dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ ret = -EINVAL; \ goto out; \ } \ var += PCI_VPD_INFO_FLD_HDR_SIZE; \ } while (0) FIND_VPD_KW(i, "RV"); for (csum = 0; i >= 0; i--) csum += vpd[i]; if (csum) { dev_err(adapter->pdev_dev, "corrupted VPD EEPROM, actual csum %u\n", csum); ret = -EINVAL; goto out; } FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); strim(p->id); memcpy(p->ec, vpd + ec, EC_LEN); strim(p->ec); i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); /* * Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); ret = t4_query_params(adapter, adapter->mbox, 0, 0, 1, &cclk_param, &cclk_val); out: vfree(vpd); if (ret) return ret; p->cclk = cclk_val; return 0; } /* serial flash and firmware constants */ enum { SF_ATTEMPTS = 10, /* max retries for SF operations */ /* flash command opcodes */ SF_PROG_PAGE = 2, /* program page */ SF_WR_DISABLE = 4, /* disable writes */ SF_RD_STATUS = 5, /* read status register */ SF_WR_ENABLE = 6, /* enable writes */ SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ FW_MAX_SIZE = 512 * 1024, }; /** * sf1_read - read data from the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to read * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @valp: where to store the read data * * Reads up to 4 bytes of data from the serial flash. The location of * the read needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 *valp) { int ret; if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & SF_BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); if (!ret) *valp = t4_read_reg(adapter, SF_DATA); return ret; } /** * sf1_write - write data to the serial flash * @adapter: the adapter * @byte_cnt: number of bytes to write * @cont: whether another operation will be chained * @lock: whether to lock SF for PL access only * @val: value to write * * Writes up to 4 bytes of data to the serial flash. The location of * the write needs to be specified prior to calling this by issuing the * appropriate commands to the serial flash. */ static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, int lock, u32 val) { if (!byte_cnt || byte_cnt > 4) return -EINVAL; if (t4_read_reg(adapter, SF_OP) & SF_BUSY) return -EBUSY; cont = cont ? SF_CONT : 0; lock = lock ? SF_LOCK : 0; t4_write_reg(adapter, SF_DATA, val); t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1) | OP_WR); return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); } /** * flash_wait_op - wait for a flash operation to complete * @adapter: the adapter * @attempts: max number of polls of the status register * @delay: delay between polls in ms * * Wait for a flash operation to complete by polling the status register. */ static int flash_wait_op(struct adapter *adapter, int attempts, int delay) { int ret; u32 status; while (1) { if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) return ret; if (!(status & 1)) return 0; if (--attempts == 0) return -EAGAIN; if (delay) msleep(delay); } } /** * t4_read_flash - read words from serial flash * @adapter: the adapter * @addr: the start address for the read * @nwords: how many 32-bit words to read * @data: where to store the read data * @byte_oriented: whether to store data as bytes or as words * * Read the specified number of 32-bit words from the serial flash. * If @byte_oriented is set the read data is stored as a byte array * (i.e., big-endian), otherwise as 32-bit words in the platform's * natural endianess. */ static int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented) { int ret; if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) return -EINVAL; addr = swab32(addr) | SF_RD_DATA_FAST; if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) return ret; for ( ; nwords; nwords--, data++) { ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); if (nwords == 1) t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ if (ret) return ret; if (byte_oriented) *data = (__force __u32) (htonl(*data)); } return 0; } /** * t4_write_flash - write up to a page of data to the serial flash * @adapter: the adapter * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. */ static int t4_write_flash(struct adapter *adapter, unsigned int addr, unsigned int n, const u8 *data) { int ret; u32 buf[64]; unsigned int i, c, left, val, offset = addr & 0xff; if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; val = swab32(addr) | SF_PROG_PAGE; if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock; for (left = n; left; left -= c) { c = min(left, 4U); for (val = 0, i = 0; i < c; ++i) val = (val << 8) + *data++; ret = sf1_write(adapter, c, c != left, 1, val); if (ret) goto unlock; } ret = flash_wait_op(adapter, 8, 1); if (ret) goto unlock; t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ /* Read the page to verify the write succeeded */ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); if (ret) return ret; if (memcmp(data - n, (u8 *)buf + offset, n)) { dev_err(adapter->pdev_dev, "failed to correctly write the flash page at %#x\n", addr); return -EIO; } return 0; unlock: t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * get_fw_version - read the firmware version * @adapter: the adapter * @vers: where to place the version * * Reads the FW version from flash. */ static int get_fw_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); } /** * get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version * * Reads the TP microcode version from flash. */ static int get_tp_version(struct adapter *adapter, u32 *vers) { return t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0); } /** * t4_check_fw_version - check if the FW is compatible with this driver * @adapter: the adapter * * Checks if an adapter's FW is compatible with the driver. Returns 0 * if there's exact match, a negative error if the version could not be * read or there's a major version mismatch, and a positive value if the * expected major version is found but there's a minor version mismatch. */ int t4_check_fw_version(struct adapter *adapter) { u32 api_vers[2]; int ret, major, minor, micro; ret = get_fw_version(adapter, &adapter->params.fw_vers); if (!ret) ret = get_tp_version(adapter, &adapter->params.tp_vers); if (!ret) ret = t4_read_flash(adapter, adapter->params.sf_fw_start + offsetof(struct fw_hdr, intfver_nic), 2, api_vers, 1); if (ret) return ret; major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); memcpy(adapter->params.api_vers, api_vers, sizeof(adapter->params.api_vers)); if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ dev_err(adapter->pdev_dev, "card FW has major version %u, driver wants %u\n", major, FW_VERSION_MAJOR); return -EINVAL; } if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) return 0; /* perfect match */ /* Minor/micro version mismatch. Report it but often it's OK. */ return 1; } /** * t4_flash_erase_sectors - erase a range of flash sectors * @adapter: the adapter * @start: the first sector to erase * @end: the last sector to erase * * Erases the sectors in the given inclusive range. */ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, SF_ERASE_SECTOR | (start << 8))) != 0 || (ret = flash_wait_op(adapter, 14, 500)) != 0) { dev_err(adapter->pdev_dev, "erase of flash sector %d failed, error %d\n", start, ret); break; } start++; } t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ return ret; } /** * t4_flash_cfg_addr - return the address of the flash configuration file * @adapter: the adapter * * Return the address within the flash where the Firmware Configuration * File is stored. */ unsigned int t4_flash_cfg_addr(struct adapter *adapter) { if (adapter->params.sf_size == 0x100000) return FLASH_FPGA_CFG_START; else return FLASH_CFG_START; } /** * t4_load_cfg - download config file * @adap: the adapter * @cfg_data: the cfg text file to write * @size: text file size * * Write the supplied config text file to the card's serial flash. */ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) { int ret, i, n; unsigned int addr; unsigned int flash_cfg_start_sec; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; addr = t4_flash_cfg_addr(adap); flash_cfg_start_sec = addr / SF_SEC_SIZE; if (size > FLASH_CFG_MAX_SIZE) { dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", FLASH_CFG_MAX_SIZE); return -EFBIG; } i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ sf_sec_size); ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, flash_cfg_start_sec + i - 1); /* * If size == 0 then we're simply erasing the FLASH sectors associated * with the on-adapter Firmware Configuration File. */ if (ret || size == 0) goto out; /* this will write to the flash up to SF_PAGE_SIZE at a time */ for (i = 0; i < size; i += SF_PAGE_SIZE) { if ((size - i) < SF_PAGE_SIZE) n = size - i; else n = SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, n, cfg_data); if (ret) goto out; addr += SF_PAGE_SIZE; cfg_data += SF_PAGE_SIZE; } out: if (ret) dev_err(adap->pdev_dev, "config file %s failed %d\n", (size == 0 ? "clear" : "download"), ret); return ret; } /** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write * @size: image size * * Write the supplied firmware image to the card's serial flash. */ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) { u32 csum; int ret, addr; unsigned int i; u8 first_page[SF_PAGE_SIZE]; const __be32 *p = (const __be32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; unsigned int fw_img_start = adap->params.sf_fw_start; unsigned int fw_start_sec = fw_img_start / sf_sec_size; if (!size) { dev_err(adap->pdev_dev, "FW image has no data\n"); return -EINVAL; } if (size & 511) { dev_err(adap->pdev_dev, "FW image size not multiple of 512 bytes\n"); return -EINVAL; } if (ntohs(hdr->len512) * 512 != size) { dev_err(adap->pdev_dev, "FW image size differs from size in FW header\n"); return -EINVAL; } if (size > FW_MAX_SIZE) { dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", FW_MAX_SIZE); return -EFBIG; } for (csum = 0, i = 0; i < size / sizeof(csum); i++) csum += ntohl(p[i]); if (csum != 0xffffffff) { dev_err(adap->pdev_dev, "corrupted firmware image, checksum %#x\n", csum); return -EINVAL; } i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); if (ret) goto out; /* * We write the correct version at the end so the driver can see a bad * version if the FW write fails. Start by writing a copy of the * first page with a bad version. */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); if (ret) goto out; addr = fw_img_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); if (ret) goto out; } ret = t4_write_flash(adap, fw_img_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", ret); return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) /** * t4_link_start - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then * enable/disable auto-negotiation as desired, and reset. * - If the PHY does not auto-negotiate just reset it. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); lc->link_ok = 0; if (lc->requested_fc & PAUSE_RX) fc |= FW_PORT_CAP_FC_RX; if (lc->requested_fc & PAUSE_TX) fc |= FW_PORT_CAP_FC_TX; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); if (!(lc->supported & FW_PORT_CAP_ANEG)) { c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else if (lc->autoneg == AUTONEG_DISABLE) { c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); } else c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_restart_aneg - restart autonegotiation * @adap: the adapter * @mbox: mbox to use for the FW command * @port: the port id * * Restarts autonegotiation for the selected port. */ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) { struct fw_port_cmd c; memset(&c, 0, sizeof(c)); c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } typedef void (*int_handler_t)(struct adapter *adap); struct intr_info { unsigned int mask; /* bits to check in interrupt status */ const char *msg; /* message to print or NULL */ short stat_idx; /* stat counter to increment or -1 */ unsigned short fatal; /* whether the condition reported is fatal */ int_handler_t int_handler; /* platform-specific int handler */ }; /** * t4_handle_intr_status - table driven interrupt handler * @adapter: the adapter that generated the interrupt * @reg: the interrupt status register to process * @acts: table of interrupt actions * * A table driven interrupt handler that applies a set of masks to an * interrupt status word and performs the corresponding actions if the * interrupts described by the mask have occurred. The actions include * optionally emitting a warning or alert message. The table is terminated * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. */ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, const struct intr_info *acts) { int fatal = 0; unsigned int mask = 0; unsigned int status = t4_read_reg(adapter, reg); for ( ; acts->mask; ++acts) { if (!(status & acts->mask)) continue; if (acts->fatal) { fatal++; dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); } else if (acts->msg && printk_ratelimit()) dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, status & acts->mask); if (acts->int_handler) acts->int_handler(adapter); mask |= acts->mask; } status &= mask; if (status) /* clear processed interrupts */ t4_write_reg(adapter, reg, status); return fatal; } /* * Interrupt handler for the PCIE module. */ static void pcie_intr_handler(struct adapter *adapter) { static const struct intr_info sysbus_intr_info[] = { { RNPP, "RXNP array parity error", -1, 1 }, { RPCP, "RXPC array parity error", -1, 1 }, { RCIP, "RXCIF array parity error", -1, 1 }, { RCCP, "Rx completions control array parity error", -1, 1 }, { RFTP, "RXFT array parity error", -1, 1 }, { 0 } }; static const struct intr_info pcie_port_intr_info[] = { { TPCP, "TXPC array parity error", -1, 1 }, { TNPP, "TXNP array parity error", -1, 1 }, { TFTP, "TXFT array parity error", -1, 1 }, { TCAP, "TXCA array parity error", -1, 1 }, { TCIP, "TXCIF array parity error", -1, 1 }, { RCAP, "RXCA array parity error", -1, 1 }, { OTDD, "outbound request TLP discarded", -1, 1 }, { RDPE, "Rx data parity error", -1, 1 }, { TDUE, "Tx uncorrectable data error", -1, 1 }, { 0 } }; static const struct intr_info pcie_intr_info[] = { { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, { MSIDATAPERR, "MSI data parity error", -1, 1 }, { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, { FIDPERR, "PCI FID parity error", -1, 1 }, { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, { MATAGPERR, "PCI MA tag parity error", -1, 1 }, { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, { RXWRPERR, "PCI Rx write parity error", -1, 1 }, { RPLPERR, "PCI replay buffer parity error", -1, 1 }, { PCIESINT, "PCI core secondary fault", -1, 1 }, { PCIEPINT, "PCI core primary fault", -1, 1 }, { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, sysbus_intr_info) + t4_handle_intr_status(adapter, PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, pcie_port_intr_info) + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); if (fat) t4_fatal_err(adapter); } /* * TP interrupt handler. */ static void tp_intr_handler(struct adapter *adapter) { static const struct intr_info tp_intr_info[] = { { 0x3fffffff, "TP parity error", -1, 1 }, { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) t4_fatal_err(adapter); } /* * SGE interrupt handler. */ static void sge_intr_handler(struct adapter *adapter) { u64 v; static const struct intr_info sge_intr_info[] = { { ERR_CPL_EXCEED_IQE_SIZE, "SGE received CPL exceeding IQE size", -1, 1 }, { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 0 }, { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 0 }, { ERR_ING_CTXT_PRIO, "SGE too many priority ingress contexts", -1, 0 }, { ERR_EGR_CTXT_PRIO, "SGE too many priority egress contexts", -1, 0 }, { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, { 0 } }; v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); if (v) { dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", (unsigned long long)v); t4_write_reg(adapter, SGE_INT_CAUSE1, v); t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); } if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || v != 0) t4_fatal_err(adapter); } /* * CIM interrupt handler. */ static void cim_intr_handler(struct adapter *adapter) { static const struct intr_info cim_intr_info[] = { { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, { OBQPARERR, "CIM OBQ parity error", -1, 1 }, { IBQPARERR, "CIM IBQ parity error", -1, 1 }, { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, { 0 } }; static const struct intr_info cim_upintr_info[] = { { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, { ILLWRINT, "CIM illegal write", -1, 1 }, { ILLRDINT, "CIM illegal read", -1, 1 }, { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, cim_intr_info) + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, cim_upintr_info); if (fat) t4_fatal_err(adapter); } /* * ULP RX interrupt handler. */ static void ulprx_intr_handler(struct adapter *adapter) { static const struct intr_info ulprx_intr_info[] = { { 0x1800000, "ULPRX context error", -1, 1 }, { 0x7fffff, "ULPRX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) t4_fatal_err(adapter); } /* * ULP TX interrupt handler. */ static void ulptx_intr_handler(struct adapter *adapter) { static const struct intr_info ulptx_intr_info[] = { { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 0 }, { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 0 }, { 0xfffffff, "ULPTX parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) t4_fatal_err(adapter); } /* * PM TX interrupt handler. */ static void pmtx_intr_handler(struct adapter *adapter) { static const struct intr_info pmtx_intr_info[] = { { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) t4_fatal_err(adapter); } /* * PM RX interrupt handler. */ static void pmrx_intr_handler(struct adapter *adapter) { static const struct intr_info pmrx_intr_info[] = { { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, { 0 } }; if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) t4_fatal_err(adapter); } /* * CPL switch interrupt handler. */ static void cplsw_intr_handler(struct adapter *adapter) { static const struct intr_info cplsw_intr_info[] = { { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) t4_fatal_err(adapter); } /* * LE interrupt handler. */ static void le_intr_handler(struct adapter *adap) { static const struct intr_info le_intr_info[] = { { LIPMISS, "LE LIP miss", -1, 0 }, { LIP0, "LE 0 LIP error", -1, 0 }, { PARITYERR, "LE parity error", -1, 1 }, { UNKNOWNCMD, "LE unknown command", -1, 1 }, { REQQPARERR, "LE request queue parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) t4_fatal_err(adap); } /* * MPS interrupt handler. */ static void mps_intr_handler(struct adapter *adapter) { static const struct intr_info mps_rx_intr_info[] = { { 0xffffff, "MPS Rx parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_tx_intr_info[] = { { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, { BUBBLE, "MPS Tx underflow", -1, 1 }, { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, { FRMERR, "MPS Tx framing error", -1, 1 }, { 0 } }; static const struct intr_info mps_trc_intr_info[] = { { FILTMEM, "MPS TRC filter parity error", -1, 1 }, { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, { MISCPERR, "MPS TRC misc parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_sram_intr_info[] = { { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_tx_intr_info[] = { { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_stat_rx_intr_info[] = { { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, { 0 } }; static const struct intr_info mps_cls_intr_info[] = { { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, { 0 } }; int fat; fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, mps_rx_intr_info) + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, mps_tx_intr_info) + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, mps_trc_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, mps_stat_sram_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, mps_stat_tx_intr_info) + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, mps_stat_rx_intr_info) + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, mps_cls_intr_info); t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | RXINT | TXINT | STATINT); t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ if (fat) t4_fatal_err(adapter); } #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) /* * EDC/MC interrupt handler. */ static void mem_intr_handler(struct adapter *adapter, int idx) { static const char name[3][5] = { "EDC0", "EDC1", "MC" }; unsigned int addr, cnt_addr, v; if (idx <= MEM_EDC1) { addr = EDC_REG(EDC_INT_CAUSE, idx); cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); } else { addr = MC_INT_CAUSE; cnt_addr = MC_ECC_STATUS; } v = t4_read_reg(adapter, addr) & MEM_INT_MASK; if (v & PERR_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", name[idx]); if (v & ECC_CE_INT_CAUSE) { u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); if (printk_ratelimit()) dev_warn(adapter->pdev_dev, "%u %s correctable ECC data error%s\n", cnt, name[idx], cnt > 1 ? "s" : ""); } if (v & ECC_UE_INT_CAUSE) dev_alert(adapter->pdev_dev, "%s uncorrectable ECC data error\n", name[idx]); t4_write_reg(adapter, addr, v); if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) t4_fatal_err(adapter); } /* * MA interrupt handler. */ static void ma_intr_handler(struct adapter *adap) { u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); if (status & MEM_PERR_INT_CAUSE) dev_alert(adap->pdev_dev, "MA parity error, parity status %#x\n", t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); if (status & MEM_WRAP_INT_CAUSE) { v = t4_read_reg(adap, MA_INT_WRAP_STATUS); dev_alert(adap->pdev_dev, "MA address wrap-around error by " "client %u to address %#x\n", MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4); } t4_write_reg(adap, MA_INT_CAUSE, status); t4_fatal_err(adap); } /* * SMB interrupt handler. */ static void smb_intr_handler(struct adapter *adap) { static const struct intr_info smb_intr_info[] = { { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) t4_fatal_err(adap); } /* * NC-SI interrupt handler. */ static void ncsi_intr_handler(struct adapter *adap) { static const struct intr_info ncsi_intr_info[] = { { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) t4_fatal_err(adap); } /* * XGMAC interrupt handler. */ static void xgmac_intr_handler(struct adapter *adap, int port) { u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; if (!v) return; if (v & TXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", port); if (v & RXFIFO_PRTY_ERR) dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", port); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); t4_fatal_err(adap); } /* * PL interrupt handler. */ static void pl_intr_handler(struct adapter *adap) { static const struct intr_info pl_intr_info[] = { { FATALPERR, "T4 fatal parity error", -1, 1 }, { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, { 0 } }; if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) t4_fatal_err(adap); } #define PF_INTR_MASK (PFSW) #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ CPL_SWITCH | SGE | ULP_TX) /** * t4_slow_intr_handler - control path interrupt handler * @adapter: the adapter * * T4 interrupt handler for non-data global interrupt events, e.g., errors. * The designation 'slow' is because it involves register reads, while * data interrupts typically don't involve any MMIOs. */ int t4_slow_intr_handler(struct adapter *adapter) { u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); if (!(cause & GLBL_INTR_MASK)) return 0; if (cause & CIM) cim_intr_handler(adapter); if (cause & MPS) mps_intr_handler(adapter); if (cause & NCSI) ncsi_intr_handler(adapter); if (cause & PL) pl_intr_handler(adapter); if (cause & SMB) smb_intr_handler(adapter); if (cause & XGMAC0) xgmac_intr_handler(adapter, 0); if (cause & XGMAC1) xgmac_intr_handler(adapter, 1); if (cause & XGMAC_KR0) xgmac_intr_handler(adapter, 2); if (cause & XGMAC_KR1) xgmac_intr_handler(adapter, 3); if (cause & PCIE) pcie_intr_handler(adapter); if (cause & MC) mem_intr_handler(adapter, MEM_MC); if (cause & EDC0) mem_intr_handler(adapter, MEM_EDC0); if (cause & EDC1) mem_intr_handler(adapter, MEM_EDC1); if (cause & LE) le_intr_handler(adapter); if (cause & TP) tp_intr_handler(adapter); if (cause & MA) ma_intr_handler(adapter); if (cause & PM_TX) pmtx_intr_handler(adapter); if (cause & PM_RX) pmrx_intr_handler(adapter); if (cause & ULP_RX) ulprx_intr_handler(adapter); if (cause & CPL_SWITCH) cplsw_intr_handler(adapter); if (cause & SGE) sge_intr_handler(adapter); if (cause & ULP_TX) ulptx_intr_handler(adapter); /* Clear the interrupts just processed for which we are the master. */ t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ return 1; } /** * t4_intr_enable - enable interrupts * @adapter: the adapter whose interrupts should be enabled * * Enable PF-specific interrupts for the calling function and the top-level * interrupt concentrator for global interrupts. Interrupts are already * enabled at each module, here we just enable the roots of the interrupt * hierarchies. * * Note: this function should be called only when the driver manages * non PF-specific interrupts from the various HW modules. Only one PCI * function at a time should be doing this. */ void t4_intr_enable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | DBFIFO_HP_INT | DBFIFO_LP_INT | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); } /** * t4_intr_disable - disable interrupts * @adapter: the adapter whose interrupts should be disabled * * Disable interrupts. We only disable the top-level interrupt * concentrators. The caller must be a PCI function managing global * interrupts. */ void t4_intr_disable(struct adapter *adapter) { u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); } /** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching. */ static int hash_mac_addr(const u8 *addr) { u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; a ^= b; a ^= (a >> 12); a ^= (a >> 6); return a & 0x3f; } /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command * @viid: virtual interface whose RSS subtable is to be written * @start: start entry in the table to write * @n: how many table entries to write * @rspq: values for the response queue lookup table * @nrspq: number of values in @rspq * * Programs the selected part of the VI's RSS mapping table with the * provided values. If @nrspq < @n the supplied values are used repeatedly * until the full table range is populated. * * The caller must ensure the values in @rspq are in the range allowed for * @viid. */ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, int start, int n, const u16 *rspq, unsigned int nrspq) { int ret; const u16 *rsp = rspq; const u16 *rsp_end = rspq + nrspq; struct fw_rss_ind_tbl_cmd cmd; memset(&cmd, 0, sizeof(cmd)); cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_RSS_IND_TBL_CMD_VIID(viid)); cmd.retval_len16 = htonl(FW_LEN16(cmd)); /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ while (n > 0) { int nq = min(n, 32); __be32 *qp = &cmd.iq0_to_iq2; cmd.niqid = htons(nq); cmd.startidx = htons(start); start += nq; n -= nq; while (nq > 0) { unsigned int v; v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); if (++rsp >= rsp_end) rsp = rspq; v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); if (++rsp >= rsp_end) rsp = rspq; *qp++ = htonl(v); nq -= 3; } ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); if (ret) return ret; } return 0; } /** * t4_config_glbl_rss - configure the global RSS mode * @adapter: the adapter * @mbox: mbox to use for the FW command * @mode: global RSS mode * @flags: mode-specific flags * * Sets the global RSS mode. */ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, unsigned int flags) { struct fw_rss_glb_config_cmd c; memset(&c, 0, sizeof(c)); c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); c.retval_len16 = htonl(FW_LEN16(c)); if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { c.u.basicvirtual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); } else return -EINVAL; return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); } /** * t4_tp_get_tcp_stats - read TP's TCP MIB counters * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6) { u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) #define STAT(x) val[STAT_IDX(x)] #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); v4->tcpOutRsts = STAT(OUT_RST); v4->tcpInSegs = STAT64(IN_SEG); v4->tcpOutSegs = STAT64(OUT_SEG); v4->tcpRetransSegs = STAT64(RXT_SEG); } if (v6) { t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); v6->tcpOutRsts = STAT(OUT_RST); v6->tcpInSegs = STAT64(IN_SEG); v6->tcpOutSegs = STAT64(OUT_SEG); v6->tcpRetransSegs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter * @mtus: where to store the MTU values * @mtu_log: where to store the MTU base-2 log (may be %NULL) * * Reads the HW path MTU table. */ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) { u32 v; int i; for (i = 0; i < NMTUS; ++i) { t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(0xff) | MTUVALUE(i)); v = t4_read_reg(adap, TP_MTU_TABLE); mtus[i] = MTUVALUE_GET(v); if (mtu_log) mtu_log[i] = MTUWIDTH_GET(v); } } /** * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register * @adap: the adapter * @addr: the indirect TP register address * @mask: specifies the field within the register to modify * @val: new value for the field * * Sets a field of an indirect TP register to the given value. */ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val) { t4_write_reg(adap, TP_PIO_ADDR, addr); val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; t4_write_reg(adap, TP_PIO_DATA, val); } /** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control * * Initialize the congestion control parameters. */ static void init_cong_ctrl(unsigned short *a, unsigned short *b) { a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; a[9] = 2; a[10] = 3; a[11] = 4; a[12] = 5; a[13] = 6; a[14] = 7; a[15] = 8; a[16] = 9; a[17] = 10; a[18] = 14; a[19] = 17; a[20] = 21; a[21] = 25; a[22] = 30; a[23] = 35; a[24] = 45; a[25] = 60; a[26] = 80; a[27] = 100; a[28] = 200; a[29] = 300; a[30] = 400; a[31] = 500; b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; b[9] = b[10] = 1; b[11] = b[12] = 2; b[13] = b[14] = b[15] = b[16] = 3; b[17] = b[18] = b[19] = b[20] = b[21] = 4; b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; b[28] = b[29] = 6; b[30] = b[31] = 7; } /* The minimum additive increment value for the congestion control table */ #define CC_MIN_INCR 2U /** * t4_load_mtus - write the MTU and congestion control HW tables * @adap: the adapter * @mtus: the values for the MTU table * @alpha: the values for the congestion control alpha parameter * @beta: the values for the congestion control beta parameter * * Write the HW MTU table with the supplied MTUs and the high-speed * congestion control table with the supplied alpha, beta, and MTUs. * We write the two tables together because the additive increments * depend on the MTUs. */ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta) { static const unsigned int avg_pkts[NCCTRL_WIN] = { 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 28672, 40960, 57344, 81920, 114688, 163840, 229376 }; unsigned int i, w; for (i = 0; i < NMTUS; ++i) { unsigned int mtu = mtus[i]; unsigned int log2 = fls(mtu); if (!(mtu & ((1 << log2) >> 2))) /* round */ log2--; t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | MTUWIDTH(log2) | MTUVALUE(mtu)); for (w = 0; w < NCCTRL_WIN; ++w) { unsigned int inc; inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], CC_MIN_INCR); t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | (w << 16) | (beta[w] << 13) | inc); } } } /** * get_mps_bg_map - return the buffer groups associated with a port * @adap: the adapter * @idx: the port index * * Returns a bitmap indicating which MPS buffer groups are associated * with the given port. Bit i is set if buffer group i is used by the * port. */ static unsigned int get_mps_bg_map(struct adapter *adap, int idx) { u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); if (n == 0) return idx == 0 ? 0xf : 0; if (n == 1) return idx < 2 ? (3 << (2 * idx)) : 0; return 1 << idx; } /** * t4_get_port_stats - collect port statistics * @adap: the adapter * @idx: the port index * @p: the stats structure to fill * * Collect statistics related to the given port from HW. */ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) { u32 bgmap = get_mps_bg_map(adap, idx); #define GET_STAT(name) \ t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) p->tx_octets = GET_STAT(TX_PORT_BYTES); p->tx_frames = GET_STAT(TX_PORT_FRAMES); p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); p->tx_error_frames = GET_STAT(TX_PORT_ERROR); p->tx_frames_64 = GET_STAT(TX_PORT_64B); p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); p->tx_drop = GET_STAT(TX_PORT_DROP); p->tx_pause = GET_STAT(TX_PORT_PAUSE); p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); p->rx_octets = GET_STAT(RX_PORT_BYTES); p->rx_frames = GET_STAT(RX_PORT_FRAMES); p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); p->rx_runt = GET_STAT(RX_PORT_LESS_64B); p->rx_frames_64 = GET_STAT(RX_PORT_64B); p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); p->rx_pause = GET_STAT(RX_PORT_PAUSE); p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; #undef GET_STAT #undef GET_STAT_COM } /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter * @port: the physical port index * @addr: MAC address expected in magic packets, %NULL to disable * * Enables/disables magic packet wake-on-LAN for the selected port. */ void t4_wol_magic_enable(struct adapter *adap, unsigned int port, const u8 *addr) { if (addr) { t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]); t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), (addr[0] << 8) | addr[1]); } t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, addr ? MAGICEN : 0); } /** * t4_wol_pat_enable - enable/disable pattern-based WoL * @adap: the adapter * @port: the physical port index * @map: bitmap of which HW pattern filters to set * @mask0: byte mask for bytes 0-63 of a packet * @mask1: byte mask for bytes 64-127 of a packet * @crc: Ethernet CRC for selected bytes * @enable: enable/disable switch * * Sets the pattern filters indicated in @map to mask out the bytes * specified in @mask0/@mask1 in received packets and compare the CRC of * the resulting packet against @crc. If @enable is %true pattern-based * WoL is enabled, otherwise disabled. */ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, u64 mask0, u64 mask1, unsigned int crc, bool enable) { int i; if (!enable) { t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), PATEN, 0); return 0; } if (map > 0xff) return -EINVAL; #define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); t4_write_reg(adap, EPIO_REG(DATA2), mask1); t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); for (i = 0; i < NWOL_PAT; i++, map >>= 1) { if (!(map & 1)) continue; /* write byte masks */ t4_write_reg(adap, EPIO_REG(DATA0), mask0); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) return -ETIMEDOUT; /* write CRC */ t4_write_reg(adap, EPIO_REG(DATA0), crc); t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); t4_read_reg(adap, EPIO_REG(OP)); /* flush */ if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) return -ETIMEDOUT; } #undef EPIO_REG t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); return 0; } /* t4_mk_filtdelwr - create a delete filter WR * @ftid: the filter ID * @wr: the filter work request to populate * @qid: ingress queue to receive the delete notification * * Creates a filter work request to delete the supplied filter. If @qid is * negative the delete notification is suppressed. */ void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) { memset(wr, 0, sizeof(*wr)); wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | V_FW_FILTER_WR_NOREPLY(qid < 0)); wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); if (qid >= 0) wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); } #define INIT_CMD(var, cmd, rd_wr) do { \ (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ FW_CMD_REQUEST | FW_CMD_##rd_wr); \ (var).retval_len16 = htonl(FW_LEN16(var)); \ } while (0) int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val) { struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.addrval.addr = htonl(addr); c.u.addrval.val = htonl(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_mem_win_read_len - read memory through PCIE memory window * @adap: the adapter * @addr: address of first byte requested aligned on 32b. * @data: len bytes to hold the data read * @len: amount of data to read from window. Must be <= * MEMWIN0_APERATURE after adjusting for 16B alignment * requirements of the the memory window. * * Read len bytes of data from MC starting at @addr. */ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) { int i; int off; /* * Align on a 16B boundary. */ off = addr & 15; if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) return -EINVAL; t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) *data++ = (__force __be32) t4_read_reg(adap, (MEMWIN0_BASE + off + i)); return 0; } /** * t4_mdio_rd - read a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to read * @valp: where to store the value * * Issues a FW command through the given mailbox to read a PHY register. */ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 *valp) { int ret; struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) *valp = ntohs(c.u.mdio.rval); return ret; } /** * t4_mdio_wr - write a PHY register through MDIO * @adap: the adapter * @mbox: mailbox to use for the FW command * @phy_addr: the PHY address * @mmd: the PHY MMD to access (0 for clause 22 PHYs) * @reg: the register to write * @valp: value to write * * Issues a FW command through the given mailbox to write a PHY register. */ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 val) { struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | FW_LDST_CMD_MMD(mmd)); c.u.mdio.raddr = htons(reg); c.u.mdio.rval = htons(val); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_hello - establish communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @evt_mbox: mailbox to receive async FW events * @master: specifies the caller's willingness to be the device master * @state: returns the current device state (if non-NULL) * * Issues a command to establish communication with FW. Returns either * an error (negative integer) or the mailbox of the Master PF. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; u32 v; unsigned int master_mbox; int retries = FW_CMD_HELLO_RETRIES; retry: memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_clearinit = htonl( FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : FW_HELLO_CMD_MBMASTER_MASK) | FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | FW_HELLO_CMD_CLEARINIT); /* * Issue the HELLO command to the firmware. If it's not successful * but indicates that we got a "busy" or "timeout" condition, retry * the HELLO until we exhaust our retry limit. */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret < 0) { if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) goto retry; return ret; } v = ntohl(c.err_to_clearinit); master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); if (state) { if (v & FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; else if (v & FW_HELLO_CMD_INIT) *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; } /* * If we're not the Master PF then we need to wait around for the * Master PF Driver to finish setting up the adapter. * * Note that we also do this wait if we're a non-Master-capable PF and * there is no current Master PF; a Master PF may show up momentarily * and we wouldn't want to fail pointlessly. (This can happen when an * OS loads lots of different drivers rapidly at the same time). In * this case, the Master PF returned by the firmware will be * FW_PCIE_FW_MASTER_MASK so the test below will work ... */ if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && master_mbox != mbox) { int waiting = FW_CMD_HELLO_TIMEOUT; /* * Wait for the firmware to either indicate an error or * initialized state. If we see either of these we bail out * and report the issue to the caller. If we exhaust the * "hello timeout" and we haven't exhausted our retries, try * again. Otherwise bail with a timeout error. */ for (;;) { u32 pcie_fw; msleep(50); waiting -= 50; /* * If neither Error nor Initialialized are indicated * by the firmware keep waiting till we exaust our * timeout ... and then retry if we haven't exhausted * our retries ... */ pcie_fw = t4_read_reg(adap, MA_PCIE_FW); if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { if (waiting <= 0) { if (retries-- > 0) goto retry; return -ETIMEDOUT; } continue; } /* * We either have an Error or Initialized condition * report errors preferentially. */ if (state) { if (pcie_fw & FW_PCIE_FW_ERR) *state = DEV_STATE_ERR; else if (pcie_fw & FW_PCIE_FW_INIT) *state = DEV_STATE_INIT; } /* * If we arrived before a Master PF was selected and * there's not a valid Master PF, grab its identity * for our caller. */ if (master_mbox == FW_PCIE_FW_MASTER_MASK && (pcie_fw & FW_PCIE_FW_MASTER_VLD)) master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); break; } } return master_mbox; } /** * t4_fw_bye - end communication with FW * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to terminate communication with FW. */ int t4_fw_bye(struct adapter *adap, unsigned int mbox) { struct fw_bye_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, BYE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_init_cmd - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_early_init(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_reset - issue a reset to FW * @adap: the adapter * @mbox: mailbox to use for the FW command * @reset: specifies the type of reset to perform * * Issues a reset command of the specified type to FW. */ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(reset); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_fw_halt - issue a reset/halt to FW and put uP into RESET * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @force: force uP into RESET even if FW RESET command fails * * Issues a RESET command to firmware (if desired) with a HALT indication * and then puts the microprocessor into RESET state. The RESET command * will only be issued if a legitimate mailbox is provided (mbox <= * FW_PCIE_FW_MASTER_MASK). * * This is generally used in order for the host to safely manipulate the * adapter without fear of conflicting with whatever the firmware might * be doing. The only way out of this state is to RESTART the firmware * ... */ int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) { int ret = 0; /* * If a legitimate mailbox is provided, issue a RESET command * with a HALT indication. */ if (mbox <= FW_PCIE_FW_MASTER_MASK) { struct fw_reset_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, RESET, WRITE); c.val = htonl(PIORST | PIORSTMODE); c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /* * Normally we won't complete the operation if the firmware RESET * command fails but if our caller insists we'll go ahead and put the * uP into RESET. This can be useful if the firmware is hung or even * missing ... We'll have to take the risk of putting the uP into * RESET without the cooperation of firmware in that case. * * We also force the firmware's HALT flag to be on in case we bypassed * the firmware RESET command above or we're dealing with old firmware * which doesn't have the HALT capability. This will serve as a flag * for the incoming firmware to know that it's coming out of a HALT * rather than a RESET ... if it's new enough to understand that ... */ if (ret == 0 || force) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, FW_PCIE_FW_HALT); } /* * And we always return the result of the firmware RESET command * even when we force the uP into RESET ... */ return ret; } /** * t4_fw_restart - restart the firmware by taking the uP out of RESET * @adap: the adapter * @reset: if we want to do a RESET to restart things * * Restart firmware previously halted by t4_fw_halt(). On successful * return the previous PF Master remains as the new PF Master and there * is no need to issue a new HELLO command, etc. * * We do this in two ways: * * 1. If we're dealing with newer firmware we'll simply want to take * the chip's microprocessor out of RESET. This will cause the * firmware to start up from its start vector. And then we'll loop * until the firmware indicates it's started again (PCIE_FW.HALT * reset to 0) or we timeout. * * 2. If we're dealing with older firmware then we'll need to RESET * the chip since older firmware won't recognize the PCIE_FW.HALT * flag and automatically RESET itself on startup. */ int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) { if (reset) { /* * Since we're directing the RESET instead of the firmware * doing it automatically, we need to clear the PCIE_FW.HALT * bit. */ t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); /* * If we've been given a valid mailbox, first try to get the * firmware to do the RESET. If that works, great and we can * return success. Otherwise, if we haven't been given a * valid mailbox or the RESET command failed, fall back to * hitting the chip with a hammer. */ if (mbox <= FW_PCIE_FW_MASTER_MASK) { t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); msleep(100); if (t4_fw_reset(adap, mbox, PIORST | PIORSTMODE) == 0) return 0; } t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); msleep(2000); } else { int ms; t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) return 0; msleep(100); ms += 100; } return -ETIMEDOUT; } return 0; } /** * t4_fw_upgrade - perform all of the steps necessary to upgrade FW * @adap: the adapter * @mbox: mailbox to use for the FW RESET command (if desired) * @fw_data: the firmware image to write * @size: image size * @force: force upgrade even if firmware doesn't cooperate * * Perform all of the steps necessary for upgrading an adapter's * firmware image. Normally this requires the cooperation of the * existing firmware in order to halt all existing activities * but if an invalid mailbox token is passed in we skip that step * (though we'll still put the adapter microprocessor into RESET in * that case). * * On successful return the new firmware will have been loaded and * the adapter will have been fully RESET losing all previous setup * state. On unsuccessful return the adapter may be completely hosed ... * positive errno indicates that the adapter is ~probably~ intact, a * negative errno indicates that things are looking bad ... */ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, const u8 *fw_data, unsigned int size, int force) { const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; int reset, ret; ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) return ret; ret = t4_load_fw(adap, fw_data, size); if (ret < 0) return ret; /* * Older versions of the firmware don't understand the new * PCIE_FW.HALT flag and so won't know to perform a RESET when they * restart. So for newly loaded older firmware we'll have to do the * RESET for it so it starts up on a clean slate. We can tell if * the newly loaded firmware will handle this right by checking * its header flags to see if it advertises the capability. */ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); return t4_fw_restart(adap, mbox, reset); } /** * t4_fw_config_file - setup an adapter via a Configuration File * @adap: the adapter * @mbox: mailbox to use for the FW command * @mtype: the memory type where the Configuration File is located * @maddr: the memory address where the Configuration File is located * @finiver: return value for CF [fini] version * @finicsum: return value for CF [fini] checksum * @cfcsum: return value for CF computed checksum * * Issue a command to get the firmware to process the Configuration * File located at the specified mtype/maddress. If the Configuration * File is processed successfully and return value pointers are * provided, the Configuration File "[fini] section version and * checksum values will be returned along with the computed checksum. * It's up to the caller to decide how it wants to respond to the * checksums not matching but it recommended that a prominant warning * be emitted in order to help people rapidly identify changed or * corrupted Configuration Files. * * Also note that it's possible to modify things like "niccaps", * "toecaps",etc. between processing the Configuration File and telling * the firmware to use the new configuration. Callers which want to * do this will need to "hand-roll" their own CAPS_CONFIGS commands for * Configuration Files if they want to do this. */ int t4_fw_config_file(struct adapter *adap, unsigned int mbox, unsigned int mtype, unsigned int maddr, u32 *finiver, u32 *finicsum, u32 *cfcsum) { struct fw_caps_config_cmd caps_cmd; int ret; /* * Tell the firmware to process the indicated Configuration File. * If there are no errors and the caller has provided return value * pointers for the [fini] section version, checksum and computed * checksum, pass those back to the caller. */ memset(&caps_cmd, 0, sizeof(caps_cmd)); caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ); caps_cmd.cfvalid_to_len16 = htonl(FW_CAPS_CONFIG_CMD_CFVALID | FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps_cmd)); ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); if (ret < 0) return ret; if (finiver) *finiver = ntohl(caps_cmd.finiver); if (finicsum) *finicsum = ntohl(caps_cmd.finicsum); if (cfcsum) *cfcsum = ntohl(caps_cmd.cfcsum); /* * And now tell the firmware to use the configuration we just loaded. */ caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE); caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL); } /** * t4_fixup_host_params - fix up host-dependent parameters * @adap: the adapter * @page_size: the host's Base Page Size * @cache_line_size: the host's Cache Line Size * * Various registers in T4 contain values which are dependent on the * host's Base Page and Cache Line Sizes. This function will fix all of * those registers with the appropriate values as passed in ... */ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, unsigned int cache_line_size) { unsigned int page_shift = fls(page_size) - 1; unsigned int sge_hps = page_shift - 10; unsigned int stat_len = cache_line_size > 64 ? 128 : 64; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align_log = fls(fl_align) - 1; t4_write_reg(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0(sge_hps) | HOSTPAGESIZEPF1(sge_hps) | HOSTPAGESIZEPF2(sge_hps) | HOSTPAGESIZEPF3(sge_hps) | HOSTPAGESIZEPF4(sge_hps) | HOSTPAGESIZEPF5(sge_hps) | HOSTPAGESIZEPF6(sge_hps) | HOSTPAGESIZEPF7(sge_hps)); t4_set_reg_field(adap, SGE_CONTROL, INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE_MASK, INGPADBOUNDARY(fl_align_log - 5) | EGRSTATUSPAGESIZE(stat_len != 64)); /* * Adjust various SGE Free List Host Buffer Sizes. * * This is something of a crock since we're using fixed indices into * the array which are also known by the sge.c code and the T4 * Firmware Configuration File. We need to come up with a much better * approach to managing this array. For now, the first four entries * are: * * 0: Host Page Size * 1: 64KB * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) * * For the single-MTU buffers in unpacked mode we need to include * space for the SGE Control Packet Shift, 14 byte Ethernet header, * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet * Padding boundry. All of these are accommodated in the Factory * Default Firmware Configuration File but we need to adjust it for * this host's cache line size. */ t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) & ~(fl_align-1)); t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) & ~(fl_align-1)); t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); return 0; } /** * t4_fw_initialize - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * * Issues a command to FW to partially initialize the device. This * performs initialization that generally doesn't depend on user input. */ int t4_fw_initialize(struct adapter *adap, unsigned int mbox) { struct fw_initialize_cmd c; memset(&c, 0, sizeof(c)); INIT_CMD(c, INITIALIZE, WRITE); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_query_params - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Reads the value of FW or device parameters. Up to 7 parameters can be * queried at once. */ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val) { int i, ret; struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); for (i = 0; i < nparams; i++, p += 2) *p = htonl(*params++); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) *val++ = ntohl(*p); return ret; } /** * t4_set_params - sets FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF * @vf: the VF * @nparams: the number of parameters * @params: the parameter names * @val: the parameter values * * Sets the value of FW or device parameters. Up to 7 parameters can be * specified at once. */ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) { struct fw_params_cmd c; __be32 *p = &c.param[0].mnem; if (nparams > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | FW_PARAMS_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); while (nparams--) { *p++ = htonl(*params++); *p++ = htonl(*val++); } return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_cfg_pfvf - configure PF/VF resource limits * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF being configured * @vf: the VF being configured * @txq: the max number of egress queues * @txq_eth_ctrl: the max number of egress Ethernet or control queues * @rxqi: the max number of interrupt-capable ingress queues * @rxq: the max number of interruptless ingress queues * @tc: the PCI traffic class * @vi: the max number of virtual interfaces * @cmask: the channel access rights mask for the PF/VF * @pmask: the port access rights mask for the PF/VF * @nexact: the maximum number of exact MPS filters * @rcaps: read capabilities * @wxcaps: write/execute capabilities * * Configures resource limits and capabilities for a physical or virtual * function. */ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) { struct fw_pfvf_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | FW_PFVF_CMD_VFN(vf)); c.retval_len16 = htonl(FW_LEN16(c)); c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | FW_PFVF_CMD_NIQ(rxq)); c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | FW_PFVF_CMD_PMASK(pmask) | FW_PFVF_CMD_NEQ(txq)); c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | FW_PFVF_CMD_NEXACTF(nexact)); c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | FW_PFVF_CMD_WX_CAPS(wxcaps) | FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_alloc_vi - allocate a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI * @pf: the PF owning the VI * @vf: the VF owning the VI * @nmac: number of MAC addresses needed (1 to 5) * @mac: the MAC addresses of the VI * @rss_size: size of RSS table slice associated with this VI * * Allocates a virtual interface for the given physical port. If @mac is * not %NULL it contains the MAC addresses of the VI as assigned by FW. * @mac should be large enough to hold @nmac Ethernet addresses, they are * stored consecutively so the space needed is @nmac * 6 bytes. * Returns a negative error number or the non-negative VI id. */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size) { int ret; struct fw_vi_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); c.portid_pkd = FW_VI_CMD_PORTID(port); c.nmac = nmac - 1; ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; if (mac) { memcpy(mac, c.mac, sizeof(c.mac)); switch (nmac) { case 5: memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); case 4: memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); case 3: memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); case 2: memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); } } if (rss_size) *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); } /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change * @sleep_ok: if true we may sleep while awaiting command completion * * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok) { struct fw_vi_rxmode_cmd c; /* convert to FW values */ if (mtu < 0) mtu = FW_RXMODE_MTU_NO_CHG; if (promisc < 0) promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; if (all_multi < 0) all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; if (bcast < 0) bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; if (vlanex < 0) vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); c.retval_len16 = htonl(FW_LEN16(c)); c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | FW_VI_RXMODE_CMD_PROMISCEN(promisc) | FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @free: if true any existing filters for this VI id are first removed * @naddr: the number of MAC addresses to allocate filters for (up to 7) * @addr: the MAC address(es) * @idx: where to store the index of each allocated filter * @hash: pointer to hash address filter bitmap * @sleep_ok: call is allowed to sleep * * Allocates an exact-match filter for each of the supplied addresses and * sets it to the corresponding address. If @idx is not %NULL it should * have at least @naddr entries, each of which will be set to the index of * the filter allocated for the corresponding MAC address. If a filter * could not be allocated for an address its index is set to 0xffff. * If @hash is not %NULL addresses that fail to allocate an exact filter * are hashed and update the hash filter bitmap pointed at by @hash. * * Returns a negative error number or the number of filters allocated. */ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { int i, ret; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p; if (naddr > 7) return -EINVAL; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | FW_CMD_LEN16((naddr + 2) / 2)); for (i = 0, p = c.u.exact; i < naddr; i++, p++) { p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); } ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); if (ret) return ret; for (i = 0, p = c.u.exact; i < naddr; i++, p++) { u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (idx) idx[i] = index >= NEXACT_MAC ? 0xffff : index; if (index < NEXACT_MAC) ret++; else if (hash) *hash |= (1ULL << hash_mac_addr(addr[i])); } return ret; } /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @idx: index of existing filter for old value of MAC address, or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @add_smt: if true also add the address to the HW SMT * * Modifies an exact-match filter and sets it to the new MAC address. * Note that in general it is not possible to modify the value of a given * filter so the generic way to modify an address filter is to free the one * being used by the old address value and allocate a new filter for the * new address value. @idx can be -1 if the address is a new addition. * * Returns a negative error number or the index of the filter with the new * MAC value. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt) { int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | FW_VI_MAC_CMD_SMAC_RESULT(mode) | FW_VI_MAC_CMD_IDX(idx)); memcpy(p->macaddr, addr, sizeof(p->macaddr)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret == 0) { ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); if (ret >= NEXACT_MAC) ret = -ENOMEM; } return ret; } /** * t4_set_addr_hash - program the MAC inexact-match hash filter * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @ucast: whether the hash filter should also match unicast addresses * @vec: the value to be written to the hash filter * @sleep_ok: call is allowed to sleep * * Sets the 64-bit inexact-match hash filter for a virtual interface. */ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok) { struct fw_vi_mac_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | FW_VI_MAC_CMD_HASHUNIEN(ucast) | FW_CMD_LEN16(1)); c.u.hash.hashvec = cpu_to_be64(vec); return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); } /** * t4_enable_vi - enable/disable a virtual interface * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @rx_en: 1=enable Rx, 0=disable Rx * @tx_en: 1=enable Tx, 0=disable Tx * * Enables/disables a virtual interface. */ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id * @nblinks: how many times to blink LED at 2.5 Hz * * Identifies a VI's port by blinking its LED. */ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int nblinks) { struct fw_vi_enable_cmd c; memset(&c, 0, sizeof(c)); c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); c.blinkdur = htons(nblinks); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queues * @vf: the VF owning the queues * @iqtype: the ingress queue type * @iqid: ingress queue id * @fl0id: FL0 queue id or 0xffff if no attached FL0 * @fl1id: FL1 queue id or 0xffff if no attached FL1 * * Frees an ingress queue and its associated FLs, if any. */ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id) { struct fw_iq_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | FW_IQ_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); c.iqid = htons(iqid); c.fl0id = htons(fl0id); c.fl1id = htons(fl1id); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_eth_eq_free - free an Ethernet egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees an Ethernet egress queue. */ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_eth_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | FW_EQ_ETH_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ctrl_eq_free - free a control egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ctrl_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | FW_EQ_CTRL_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_ofld_eq_free - free an offload egress queue * @adap: the adapter * @mbox: mailbox to use for the FW command * @pf: the PF owning the queue * @vf: the VF owning the queue * @eqid: egress queue id * * Frees a control egress queue. */ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid) { struct fw_eq_ofld_cmd c; memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | FW_EQ_OFLD_CMD_VFN(vf)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter * @rpl: start of the FW message * * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; if (opcode == FW_PORT_CMD) { /* link/module state change message */ int speed = 0, fc = 0; const struct fw_port_cmd *p = (void *)rpl; int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); int port = adap->chan_map[chan]; struct port_info *pi = adap2pinfo(adap, port); struct link_config *lc = &pi->link_cfg; u32 stat = ntohl(p->u.info.lstatus_to_modtype); int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); if (stat & FW_PORT_CMD_RXPAUSE) fc |= PAUSE_RX; if (stat & FW_PORT_CMD_TXPAUSE) fc |= PAUSE_TX; if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) speed = SPEED_100; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) speed = SPEED_1000; else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) speed = SPEED_10000; if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc) { /* something changed */ lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; t4_os_link_changed(adap, port, link_ok); } if (mod != pi->mod_type) { pi->mod_type = mod; t4_os_portmod_changed(adap, port); } } return 0; } static void get_pci_mode(struct adapter *adapter, struct pci_params *p) { u16 val; if (pci_is_pcie(adapter->pdev)) { pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); p->speed = val & PCI_EXP_LNKSTA_CLS; p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; } } /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state * @caps: link capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ static void init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; if (lc->supported & FW_PORT_CAP_ANEG) { lc->advertising = lc->supported & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { lc->advertising = 0; lc->autoneg = AUTONEG_DISABLE; } } int t4_wait_dev_ready(struct adapter *adap) { if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) return 0; msleep(500); return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } static int get_flash_params(struct adapter *adap) { int ret; u32 info; ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); if (!ret) ret = sf1_read(adap, 3, 0, 1, &info); t4_write_reg(adap, SF_OP, 0); /* unlock SF */ if (ret) return ret; if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ if (info >= 0x14 && info < 0x18) adap->params.sf_nsec = 1 << (info - 16); else if (info == 0x18) adap->params.sf_nsec = 64; else return -EINVAL; adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; return 0; } /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter * @reset: if true perform a HW reset * * Initialize adapter SW state for the various HW modules, set initial * values for some adapter tunables, take PHYs out of reset, and * initialize the MDIO interface. */ int t4_prep_adapter(struct adapter *adapter) { int ret; ret = t4_wait_dev_ready(adapter); if (ret < 0) return ret; get_pci_mode(adapter, &adapter->params.pci); adapter->params.rev = t4_read_reg(adapter, PL_REV); ret = get_flash_params(adapter); if (ret < 0) { dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); return ret; } init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* * Default port for debugging in case we can't reach FW. */ adapter->params.nports = 1; adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; return 0; } int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) { u8 addr[6]; int ret, i, j = 0; struct fw_port_cmd c; struct fw_rss_vi_config_cmd rvc; memset(&c, 0, sizeof(c)); memset(&rvc, 0, sizeof(rvc)); for_each_port(adap, i) { unsigned int rss_size; struct port_info *p = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_PORT_CMD_PORTID(j)); c.action_to_len16 = htonl( FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | FW_LEN16(c)); ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); if (ret) return ret; ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); if (ret < 0) return ret; p->viid = ret; p->tx_chan = j; p->lport = j; p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); adap->port[i]->dev_id = j; ret = ntohl(c.u.info.lstatus_to_modtype); p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? FW_PORT_CMD_MDIOADDR_GET(ret) : -1; p->port_type = FW_PORT_CMD_PTYPE_GET(ret); p->mod_type = FW_PORT_MOD_TYPE_NA; rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | FW_CMD_REQUEST | FW_CMD_READ | FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); rvc.retval_len16 = htonl(FW_LEN16(rvc)); ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); if (ret) return ret; p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); j++; } return 0; }
gpl-2.0
NuxiNL/linux
sound/core/seq/seq_ports.c
358
19045
/* * ALSA sequencer Ports * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <sound/core.h> #include <linux/slab.h> #include <linux/module.h> #include "seq_system.h" #include "seq_ports.h" #include "seq_clientmgr.h" /* registration of client ports */ /* NOTE: the current implementation of the port structure as a linked list is not optimal for clients that have many ports. For sending messages to all subscribers of a port we first need to find the address of the port structure, which means we have to traverse the list. A direct access table (array) would be better, but big preallocated arrays waste memory. Possible actions: 1) leave it this way, a client does normaly does not have more than a few ports 2) replace the linked list of ports by a array of pointers which is dynamicly kmalloced. When a port is added or deleted we can simply allocate a new array, copy the corresponding pointers, and delete the old one. We then only need a pointer to this array, and an integer that tells us how much elements are in array. */ /* return pointer to port structure - port is locked if found */ struct snd_seq_client_port *snd_seq_port_use_ptr(struct snd_seq_client *client, int num) { struct snd_seq_client_port *port; if (client == NULL) return NULL; read_lock(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port == num) { if (port->closing) break; /* deleting now */ snd_use_lock_use(&port->use_lock); read_unlock(&client->ports_lock); return port; } } read_unlock(&client->ports_lock); return NULL; /* not found */ } /* search for the next port - port is locked if found */ struct snd_seq_client_port *snd_seq_port_query_nearest(struct snd_seq_client *client, struct snd_seq_port_info *pinfo) { int num; struct snd_seq_client_port *port, *found; num = pinfo->addr.port; found = NULL; read_lock(&client->ports_lock); list_for_each_entry(port, &client->ports_list_head, list) { if (port->addr.port < num) continue; if (port->addr.port == num) { found = port; break; } if (found == NULL || port->addr.port < found->addr.port) found = port; } if (found) { if (found->closing) found = NULL; else snd_use_lock_use(&found->use_lock); } read_unlock(&client->ports_lock); return found; } /* initialize snd_seq_port_subs_info */ static void port_subs_info_init(struct snd_seq_port_subs_info *grp) { INIT_LIST_HEAD(&grp->list_head); grp->count = 0; grp->exclusive = 0; rwlock_init(&grp->list_lock); init_rwsem(&grp->list_mutex); grp->open = NULL; grp->close = NULL; } /* create a port, port number is returned (-1 on failure) */ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, int port) { unsigned long flags; struct snd_seq_client_port *new_port, *p; int num = -1; /* sanity check */ if (snd_BUG_ON(!client)) return NULL; if (client->num_ports >= SNDRV_SEQ_MAX_PORTS) { pr_warn("ALSA: seq: too many ports for client %d\n", client->number); return NULL; } /* create a new port */ new_port = kzalloc(sizeof(*new_port), GFP_KERNEL); if (!new_port) return NULL; /* failure, out of memory */ /* init port data */ new_port->addr.client = client->number; new_port->addr.port = -1; new_port->owner = THIS_MODULE; sprintf(new_port->name, "port-%d", num); snd_use_lock_init(&new_port->use_lock); port_subs_info_init(&new_port->c_src); port_subs_info_init(&new_port->c_dest); num = port >= 0 ? port : 0; mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port > num) break; if (port < 0) /* auto-probe mode */ num = p->addr.port + 1; } /* insert the new port */ list_add_tail(&new_port->list, &p->list); client->num_ports++; new_port->addr.port = num; /* store the port number in the port */ write_unlock_irqrestore(&client->ports_lock, flags); mutex_unlock(&client->ports_mutex); sprintf(new_port->name, "port-%d", num); return new_port; } /* */ static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack); static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, struct snd_seq_client **cp) { struct snd_seq_client_port *p; *cp = snd_seq_client_use_ptr(addr->client); if (*cp) { p = snd_seq_port_use_ptr(*cp, addr->port); if (! p) { snd_seq_client_unlock(*cp); *cp = NULL; } return p; } return NULL; } static void delete_and_unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool ack); static inline struct snd_seq_subscribers * get_subscriber(struct list_head *p, bool is_src) { if (is_src) return list_entry(p, struct snd_seq_subscribers, src_list); else return list_entry(p, struct snd_seq_subscribers, dest_list); } /* * remove all subscribers on the list * this is called from port_delete, for each src and dest list. */ static void clear_subscriber_list(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, int is_src) { struct list_head *p, *n; list_for_each_safe(p, n, &grp->list_head) { struct snd_seq_subscribers *subs; struct snd_seq_client *c; struct snd_seq_client_port *aport; subs = get_subscriber(p, is_src); if (is_src) aport = get_client_port(&subs->info.dest, &c); else aport = get_client_port(&subs->info.sender, &c); delete_and_unsubscribe_port(client, port, subs, is_src, false); if (!aport) { /* looks like the connected port is being deleted. * we decrease the counter, and when both ports are deleted * remove the subscriber info */ if (atomic_dec_and_test(&subs->ref_count)) kfree(subs); continue; } /* ok we got the connected port */ delete_and_unsubscribe_port(c, aport, subs, !is_src, true); kfree(subs); snd_seq_port_unlock(aport); snd_seq_client_unlock(c); } } /* delete port data */ static int port_delete(struct snd_seq_client *client, struct snd_seq_client_port *port) { /* set closing flag and wait for all port access are gone */ port->closing = 1; snd_use_lock_sync(&port->use_lock); /* clear subscribers info */ clear_subscriber_list(client, port, &port->c_src, true); clear_subscriber_list(client, port, &port->c_dest, false); if (port->private_free) port->private_free(port->private_data); snd_BUG_ON(port->c_src.count != 0); snd_BUG_ON(port->c_dest.count != 0); kfree(port); return 0; } /* delete a port with the given port id */ int snd_seq_delete_port(struct snd_seq_client *client, int port) { unsigned long flags; struct snd_seq_client_port *found = NULL, *p; mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); list_for_each_entry(p, &client->ports_list_head, list) { if (p->addr.port == port) { /* ok found. delete from the list at first */ list_del(&p->list); client->num_ports--; found = p; break; } } write_unlock_irqrestore(&client->ports_lock, flags); mutex_unlock(&client->ports_mutex); if (found) return port_delete(client, found); else return -ENOENT; } /* delete the all ports belonging to the given client */ int snd_seq_delete_all_ports(struct snd_seq_client *client) { unsigned long flags; struct list_head deleted_list; struct snd_seq_client_port *port, *tmp; /* move the port list to deleted_list, and * clear the port list in the client data. */ mutex_lock(&client->ports_mutex); write_lock_irqsave(&client->ports_lock, flags); if (! list_empty(&client->ports_list_head)) { list_add(&deleted_list, &client->ports_list_head); list_del_init(&client->ports_list_head); } else { INIT_LIST_HEAD(&deleted_list); } client->num_ports = 0; write_unlock_irqrestore(&client->ports_lock, flags); /* remove each port in deleted_list */ list_for_each_entry_safe(port, tmp, &deleted_list, list) { list_del(&port->list); snd_seq_system_client_ev_port_exit(port->addr.client, port->addr.port); port_delete(client, port); } mutex_unlock(&client->ports_mutex); return 0; } /* set port info fields */ int snd_seq_set_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* set port name */ if (info->name[0]) strlcpy(port->name, info->name, sizeof(port->name)); /* set capabilities */ port->capability = info->capability; /* get port type */ port->type = info->type; /* information about supported channels/voices */ port->midi_channels = info->midi_channels; port->midi_voices = info->midi_voices; port->synth_voices = info->synth_voices; /* timestamping */ port->timestamping = (info->flags & SNDRV_SEQ_PORT_FLG_TIMESTAMP) ? 1 : 0; port->time_real = (info->flags & SNDRV_SEQ_PORT_FLG_TIME_REAL) ? 1 : 0; port->time_queue = info->time_queue; return 0; } /* get port info fields */ int snd_seq_get_port_info(struct snd_seq_client_port * port, struct snd_seq_port_info * info) { if (snd_BUG_ON(!port || !info)) return -EINVAL; /* get port name */ strlcpy(info->name, port->name, sizeof(info->name)); /* get capabilities */ info->capability = port->capability; /* get port type */ info->type = port->type; /* information about supported channels/voices */ info->midi_channels = port->midi_channels; info->midi_voices = port->midi_voices; info->synth_voices = port->synth_voices; /* get subscriber counts */ info->read_use = port->c_src.count; info->write_use = port->c_dest.count; /* timestamping */ info->flags = 0; if (port->timestamping) { info->flags |= SNDRV_SEQ_PORT_FLG_TIMESTAMP; if (port->time_real) info->flags |= SNDRV_SEQ_PORT_FLG_TIME_REAL; info->time_queue = port->time_queue; } return 0; } /* * call callback functions (if any): * the callbacks are invoked only when the first (for connection) or * the last subscription (for disconnection) is done. Second or later * subscription results in increment of counter, but no callback is * invoked. * This feature is useful if these callbacks are associated with * initialization or termination of devices (see seq_midi.c). */ static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (!try_module_get(port->owner)) return -EFAULT; grp->count++; if (grp->open && grp->count == 1) { err = grp->open(port->private_data, info); if (err < 0) { module_put(port->owner); grp->count--; } } if (err >= 0 && send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED); return err; } static int unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, struct snd_seq_port_subscribe *info, int send_ack) { int err = 0; if (! grp->count) return -EINVAL; grp->count--; if (grp->close && grp->count == 0) err = grp->close(port->private_data, info); if (send_ack && client->type == USER_CLIENT) snd_seq_client_notify_subscription(port->addr.client, port->addr.port, info, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED); module_put(port->owner); return err; } /* check if both addresses are identical */ static inline int addr_match(struct snd_seq_addr *r, struct snd_seq_addr *s) { return (r->client == s->client) && (r->port == s->port); } /* check the two subscribe info match */ /* if flags is zero, checks only sender and destination addresses */ static int match_subs_info(struct snd_seq_port_subscribe *r, struct snd_seq_port_subscribe *s) { if (addr_match(&r->sender, &s->sender) && addr_match(&r->dest, &s->dest)) { if (r->flags && r->flags == s->flags) return r->queue == s->queue; else if (! r->flags) return 1; } return 0; } static int check_and_subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool exclusive, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *p; struct snd_seq_subscribers *s; int err; grp = is_src ? &port->c_src : &port->c_dest; err = -EBUSY; down_write(&grp->list_mutex); if (exclusive) { if (!list_empty(&grp->list_head)) goto __error; } else { if (grp->exclusive) goto __error; /* check whether already exists */ list_for_each(p, &grp->list_head) { s = get_subscriber(p, is_src); if (match_subs_info(&subs->info, &s->info)) goto __error; } } err = subscribe_port(client, port, grp, &subs->info, ack); if (err < 0) { grp->exclusive = 0; goto __error; } /* add to list */ write_lock_irq(&grp->list_lock); if (is_src) list_add_tail(&subs->src_list, &grp->list_head); else list_add_tail(&subs->dest_list, &grp->list_head); grp->exclusive = exclusive; atomic_inc(&subs->ref_count); write_unlock_irq(&grp->list_lock); err = 0; __error: up_write(&grp->list_mutex); return err; } static void delete_and_unsubscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_subscribers *subs, bool is_src, bool ack) { struct snd_seq_port_subs_info *grp; struct list_head *list; bool empty; grp = is_src ? &port->c_src : &port->c_dest; list = is_src ? &subs->src_list : &subs->dest_list; down_write(&grp->list_mutex); write_lock_irq(&grp->list_lock); empty = list_empty(list); if (!empty) list_del_init(list); grp->exclusive = 0; write_unlock_irq(&grp->list_lock); up_write(&grp->list_mutex); if (!empty) unsubscribe_port(client, port, grp, &subs->info, ack); } /* connect two ports */ int snd_seq_port_connect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_subscribers *subs; bool exclusive; int err; subs = kzalloc(sizeof(*subs), GFP_KERNEL); if (!subs) return -ENOMEM; subs->info = *info; atomic_set(&subs->ref_count, 0); INIT_LIST_HEAD(&subs->src_list); INIT_LIST_HEAD(&subs->dest_list); exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE); err = check_and_subscribe_port(src_client, src_port, subs, true, exclusive, connector->number != src_client->number); if (err < 0) goto error; err = check_and_subscribe_port(dest_client, dest_port, subs, false, exclusive, connector->number != dest_client->number); if (err < 0) goto error_dest; return 0; error_dest: delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); error: kfree(subs); return err; } /* remove the connection */ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client *src_client, struct snd_seq_client_port *src_port, struct snd_seq_client *dest_client, struct snd_seq_client_port *dest_port, struct snd_seq_port_subscribe *info) { struct snd_seq_port_subs_info *src = &src_port->c_src; struct snd_seq_subscribers *subs; int err = -ENOENT; down_write(&src->list_mutex); /* look for the connection */ list_for_each_entry(subs, &src->list_head, src_list) { if (match_subs_info(info, &subs->info)) { atomic_dec(&subs->ref_count); /* mark as not ready */ err = 0; break; } } up_write(&src->list_mutex); if (err < 0) return err; delete_and_unsubscribe_port(src_client, src_port, subs, true, connector->number != src_client->number); delete_and_unsubscribe_port(dest_client, dest_port, subs, false, connector->number != dest_client->number); kfree(subs); return 0; } /* get matched subscriber */ struct snd_seq_subscribers *snd_seq_port_get_subscription(struct snd_seq_port_subs_info *src_grp, struct snd_seq_addr *dest_addr) { struct snd_seq_subscribers *s, *found = NULL; down_read(&src_grp->list_mutex); list_for_each_entry(s, &src_grp->list_head, src_list) { if (addr_match(dest_addr, &s->info.dest)) { found = s; break; } } up_read(&src_grp->list_mutex); return found; } /* * Attach a device driver that wants to receive events from the * sequencer. Returns the new port number on success. * A driver that wants to receive the events converted to midi, will * use snd_seq_midisynth_register_port(). */ /* exported */ int snd_seq_event_port_attach(int client, struct snd_seq_port_callback *pcbp, int cap, int type, int midi_channels, int midi_voices, char *portname) { struct snd_seq_port_info portinfo; int ret; /* Set up the port */ memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; strlcpy(portinfo.name, portname ? portname : "Unamed port", sizeof(portinfo.name)); portinfo.capability = cap; portinfo.type = type; portinfo.kernel = pcbp; portinfo.midi_channels = midi_channels; portinfo.midi_voices = midi_voices; /* Create it */ ret = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, &portinfo); if (ret >= 0) ret = portinfo.addr.port; return ret; } EXPORT_SYMBOL(snd_seq_event_port_attach); /* * Detach the driver from a port. */ /* exported */ int snd_seq_event_port_detach(int client, int port) { struct snd_seq_port_info portinfo; int err; memset(&portinfo, 0, sizeof(portinfo)); portinfo.addr.client = client; portinfo.addr.port = port; err = snd_seq_kernel_client_ctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, &portinfo); return err; } EXPORT_SYMBOL(snd_seq_event_port_detach);
gpl-2.0
crewrktablets/android_kernel_odys_RK30_3.0.8
drivers/mmc/host/mmci.c
614
33319
/* * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. * Copyright (C) 2010 ST-Ericsson SA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/log2.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/amba/bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/amba/mmci.h> #include <asm/div64.h> #include <asm/io.h> #include <asm/sizes.h> #include "mmci.h" #define DRIVER_NAME "mmci-pl18x" static unsigned int fmax = 515633; /** * struct variant_data - MMCI variant-specific quirks * @clkreg: default value for MCICLOCK register * @clkreg_enable: enable value for MMCICLOCK register * @datalength_bits: number of bits in the MMCIDATALENGTH register * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY * is asserted (likewise for RX) * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY * is asserted (likewise for RX) * @sdio: variant supports SDIO * @st_clkdiv: true if using a ST-specific clock divider algorithm * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register */ struct variant_data { unsigned int clkreg; unsigned int clkreg_enable; unsigned int datalength_bits; unsigned int fifosize; unsigned int fifohalfsize; bool sdio; bool st_clkdiv; bool blksz_datactrl16; }; static struct variant_data variant_arm = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, .datalength_bits = 16, }; static struct variant_data variant_arm_extended_fifo = { .fifosize = 128 * 4, .fifohalfsize = 64 * 4, .datalength_bits = 16, }; static struct variant_data variant_u300 = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, .clkreg_enable = MCI_ST_U300_HWFCEN, .datalength_bits = 16, .sdio = true, }; static struct variant_data variant_ux500 = { .fifosize = 30 * 4, .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, }; static struct variant_data variant_ux500v2 = { .fifosize = 30 * 4, .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = MCI_ST_UX500_HWFCEN, .datalength_bits = 24, .sdio = true, .st_clkdiv = true, .blksz_datactrl16 = true, }; /* * This must be called with host->lock held */ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) { struct variant_data *variant = host->variant; u32 clk = variant->clkreg; if (desired) { if (desired >= host->mclk) { clk = MCI_CLK_BYPASS; if (variant->st_clkdiv) clk |= MCI_ST_UX500_NEG_EDGE; host->cclk = host->mclk; } else if (variant->st_clkdiv) { /* * DB8500 TRM says f = mclk / (clkdiv + 2) * => clkdiv = (mclk / f) - 2 * Round the divider up so we don't exceed the max * frequency */ clk = DIV_ROUND_UP(host->mclk, desired) - 2; if (clk >= 256) clk = 255; host->cclk = host->mclk / (clk + 2); } else { /* * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) * => clkdiv = mclk / (2 * f) - 1 */ clk = host->mclk / (2 * desired) - 1; if (clk >= 256) clk = 255; host->cclk = host->mclk / (2 * (clk + 1)); } clk |= variant->clkreg_enable; clk |= MCI_CLK_ENABLE; /* This hasn't proven to be worthwhile */ /* clk |= MCI_CLK_PWRSAVE; */ } if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) clk |= MCI_4BIT_BUS; if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) clk |= MCI_ST_8BIT_BUS; writel(clk, host->base + MMCICLOCK); } static void mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) { writel(0, host->base + MMCICOMMAND); BUG_ON(host->data); host->mrq = NULL; host->cmd = NULL; /* * Need to drop the host lock here; mmc_request_done may call * back into the driver... */ spin_unlock(&host->lock); mmc_request_done(host->mmc, mrq); spin_lock(&host->lock); } static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) { void __iomem *base = host->base; if (host->singleirq) { unsigned int mask0 = readl(base + MMCIMASK0); mask0 &= ~MCI_IRQ1MASK; mask0 |= mask; writel(mask0, base + MMCIMASK0); } writel(mask, base + MMCIMASK1); } static void mmci_stop_data(struct mmci_host *host) { writel(0, host->base + MMCIDATACTRL); mmci_set_mask1(host, 0); host->data = NULL; } static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) { unsigned int flags = SG_MITER_ATOMIC; if (data->flags & MMC_DATA_READ) flags |= SG_MITER_TO_SG; else flags |= SG_MITER_FROM_SG; sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); } /* * All the DMA operation mode stuff goes inside this ifdef. * This assumes that you have a generic DMA device interface, * no custom DMA interfaces are supported. */ #ifdef CONFIG_DMA_ENGINE static void __devinit mmci_dma_setup(struct mmci_host *host) { struct mmci_platform_data *plat = host->plat; const char *rxname, *txname; dma_cap_mask_t mask; if (!plat || !plat->dma_filter) { dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); return; } /* Try to acquire a generic DMA engine slave channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ if (plat->dma_rx_param) { host->dma_rx_channel = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); /* E.g if no DMA hardware is present */ if (!host->dma_rx_channel) dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); } if (plat->dma_tx_param) { host->dma_tx_channel = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param); if (!host->dma_tx_channel) dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); } else { host->dma_tx_channel = host->dma_rx_channel; } if (host->dma_rx_channel) rxname = dma_chan_name(host->dma_rx_channel); else rxname = "none"; if (host->dma_tx_channel) txname = dma_chan_name(host->dma_tx_channel); else txname = "none"; dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", rxname, txname); /* * Limit the maximum segment size in any SG entry according to * the parameters of the DMA engine device. */ if (host->dma_tx_channel) { struct device *dev = host->dma_tx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->dma_rx_channel) { struct device *dev = host->dma_rx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } } /* * This is used in __devinit or __devexit so inline it * so it can be discarded. */ static inline void mmci_dma_release(struct mmci_host *host) { struct mmci_platform_data *plat = host->plat; if (host->dma_rx_channel) dma_release_channel(host->dma_rx_channel); if (host->dma_tx_channel && plat->dma_tx_param) dma_release_channel(host->dma_tx_channel); host->dma_rx_channel = host->dma_tx_channel = NULL; } static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { struct dma_chan *chan = host->dma_current; enum dma_data_direction dir; u32 status; int i; /* Wait up to 1ms for the DMA to complete */ for (i = 0; ; i++) { status = readl(host->base + MMCISTATUS); if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) break; udelay(10); } /* * Check to see whether we still have some data left in the FIFO - * this catches DMA controllers which are unable to monitor the * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { dmaengine_terminate_all(chan); if (!data->error) data->error = -EIO; } if (data->flags & MMC_DATA_WRITE) { dir = DMA_TO_DEVICE; } else { dir = DMA_FROM_DEVICE; } dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); /* * Use of DMA with scatter-gather is impossible. * Give up with DMA and switch back to PIO mode. */ if (status & MCI_RXDATAAVLBLMASK) { dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } } static void mmci_dma_data_error(struct mmci_host *host) { dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); dmaengine_terminate_all(host->dma_current); } static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { .src_addr = host->phybase + MMCIFIFO, .dst_addr = host->phybase + MMCIFIFO, .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ }; struct mmc_data *data = host->data; struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *desc; int nr_sg; host->dma_current = NULL; if (data->flags & MMC_DATA_READ) { conf.direction = DMA_FROM_DEVICE; chan = host->dma_rx_channel; } else { conf.direction = DMA_TO_DEVICE; chan = host->dma_tx_channel; } /* If there's no DMA channel, fall back to PIO */ if (!chan) return -EINVAL; /* If less than or equal to the fifo size, don't bother with DMA */ if (host->size <= variant->fifosize) return -EINVAL; device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); if (nr_sg == 0) return -EINVAL; dmaengine_slave_config(chan, &conf); desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, conf.direction, DMA_CTRL_ACK); if (!desc) goto unmap_exit; /* Okay, go for it. */ host->dma_current = chan; dev_vdbg(mmc_dev(host->mmc), "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); dmaengine_submit(desc); dma_async_issue_pending(chan); datactrl |= MCI_DPSM_DMAENABLE; /* Trigger the DMA transfer */ writel(datactrl, host->base + MMCIDATACTRL); /* * Let the MMCI say when the data is ended and it's time * to fire next DMA request. When that happens, MMCI will * call mmci_data_end() */ writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, host->base + MMCIMASK0); return 0; unmap_exit: dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); return -ENOMEM; } #else /* Blank functions if the DMA engine is not available */ static inline void mmci_dma_setup(struct mmci_host *host) { } static inline void mmci_dma_release(struct mmci_host *host) { } static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } static inline void mmci_dma_data_error(struct mmci_host *host) { } static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { return -ENOSYS; } #endif static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { struct variant_data *variant = host->variant; unsigned int datactrl, timeout, irqmask; unsigned long long clks; void __iomem *base; int blksz_bits; dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n", data->blksz, data->blocks, data->flags); host->data = data; host->size = data->blksz * data->blocks; data->bytes_xfered = 0; clks = (unsigned long long)data->timeout_ns * host->cclk; do_div(clks, 1000000000UL); timeout = data->timeout_clks + (unsigned int)clks; base = host->base; writel(timeout, base + MMCIDATATIMER); writel(host->size, base + MMCIDATALENGTH); blksz_bits = ffs(data->blksz) - 1; BUG_ON(1 << blksz_bits != data->blksz); if (variant->blksz_datactrl16) datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); else datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode */ if (!mmci_dma_start_data(host, datactrl)) return; /* IRQ mode, map the SG list for CPU reading/writing */ mmci_init_sg(host, data); if (data->flags & MMC_DATA_READ) { irqmask = MCI_RXFIFOHALFFULLMASK; /* * If we have less than the fifo 'half-full' threshold to * transfer, trigger a PIO interrupt as soon as any data * is available. */ if (host->size < variant->fifohalfsize) irqmask |= MCI_RXDATAAVLBLMASK; } else { /* * We don't actually need to include "FIFO empty" here * since its implicit in "FIFO half empty". */ irqmask = MCI_TXFIFOHALFEMPTYMASK; } /* The ST Micro variants has a special bit to enable SDIO */ if (variant->sdio && host->mmc->card) if (mmc_card_sdio(host->mmc->card)) datactrl |= MCI_ST_DPSM_SDIOEN; writel(datactrl, base + MMCIDATACTRL); writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); mmci_set_mask1(host, irqmask); } static void mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) { void __iomem *base = host->base; dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n", cmd->opcode, cmd->arg, cmd->flags); if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) { writel(0, base + MMCICOMMAND); udelay(1); } c |= cmd->opcode | MCI_CPSM_ENABLE; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) c |= MCI_CPSM_LONGRSP; c |= MCI_CPSM_RESPONSE; } if (/*interrupt*/0) c |= MCI_CPSM_INTERRUPT; host->cmd = cmd; writel(cmd->arg, base + MMCIARGUMENT); writel(c, base + MMCICOMMAND); } static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { /* First check for errors */ if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; /* Terminate the DMA transfer */ if (dma_inprogress(host)) mmci_dma_data_error(host); /* * Calculate how far we are into the transfer. Note that * the data counter gives the number of bytes transferred * on the MMC bus, not on the host side. On reads, this * can be as much as a FIFO-worth of data ahead. This * matters for FIFO overruns only. */ remain = readl(host->base + MMCIDATACNT); success = data->blksz * data->blocks - remain; dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", status, success); if (status & MCI_DATACRCFAIL) { /* Last block was not successful */ success -= 1; data->error = -EILSEQ; } else if (status & MCI_DATATIMEOUT) { data->error = -ETIMEDOUT; } else if (status & MCI_STARTBITERR) { data->error = -ECOMM; } else if (status & MCI_TXUNDERRUN) { data->error = -EIO; } else if (status & MCI_RXOVERRUN) { if (success > host->variant->fifosize) success -= host->variant->fifosize; else success = 0; data->error = -EIO; } data->bytes_xfered = round_down(success, data->blksz); } if (status & MCI_DATABLOCKEND) dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) mmci_dma_unmap(host, data); mmci_stop_data(host); if (!data->error) /* The error clause is handled above, success! */ data->bytes_xfered = data->blksz * data->blocks; if (!data->stop) { mmci_request_end(host, data->mrq); } else { mmci_start_command(host, data->stop, 0); } } } static void mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, unsigned int status) { void __iomem *base = host->base; host->cmd = NULL; if (status & MCI_CMDTIMEOUT) { cmd->error = -ETIMEDOUT; } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) { cmd->error = -EILSEQ; } else { cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); cmd->resp[2] = readl(base + MMCIRESPONSE2); cmd->resp[3] = readl(base + MMCIRESPONSE3); } if (!cmd->data || cmd->error) { if (host->data) mmci_stop_data(host); mmci_request_end(host, cmd->mrq); } else if (!(cmd->data->flags & MMC_DATA_READ)) { mmci_start_data(host, cmd->data); } } static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain) { void __iomem *base = host->base; char *ptr = buffer; u32 status; int host_remain = host->size; do { int count = host_remain - (readl(base + MMCIFIFOCNT) << 2); if (count > remain) count = remain; if (count <= 0) break; readsl(base + MMCIFIFO, ptr, count >> 2); ptr += count; remain -= count; host_remain -= count; if (remain == 0) break; status = readl(base + MMCISTATUS); } while (status & MCI_RXDATAAVLBL); return ptr - buffer; } static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) { struct variant_data *variant = host->variant; void __iomem *base = host->base; char *ptr = buffer; do { unsigned int count, maxcnt; maxcnt = status & MCI_TXFIFOEMPTY ? variant->fifosize : variant->fifohalfsize; count = min(remain, maxcnt); /* * The ST Micro variant for SDIO transfer sizes * less then 8 bytes should have clock H/W flow * control disabled. */ if (variant->sdio && mmc_card_sdio(host->mmc->card)) { if (count < 8) writel(readl(host->base + MMCICLOCK) & ~variant->clkreg_enable, host->base + MMCICLOCK); else writel(readl(host->base + MMCICLOCK) | variant->clkreg_enable, host->base + MMCICLOCK); } /* * SDIO especially may want to send something that is * not divisible by 4 (as opposed to card sectors * etc), and the FIFO only accept full 32-bit writes. * So compensate by adding +3 on the count, a single * byte become a 32bit write, 7 bytes will be two * 32bit writes etc. */ writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); ptr += count; remain -= count; if (remain == 0) break; status = readl(base + MMCISTATUS); } while (status & MCI_TXFIFOHALFEMPTY); return ptr - buffer; } /* * PIO data transfer IRQ handler. */ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; struct sg_mapping_iter *sg_miter = &host->sg_miter; struct variant_data *variant = host->variant; void __iomem *base = host->base; unsigned long flags; u32 status; status = readl(base + MMCISTATUS); dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); local_irq_save(flags); do { unsigned int remain, len; char *buffer; /* * For write, we only need to test the half-empty flag * here - if the FIFO is completely empty, then by * definition it is more than half empty. * * For read, check for data available. */ if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL))) break; if (!sg_miter_next(sg_miter)) break; buffer = sg_miter->addr; remain = sg_miter->length; len = 0; if (status & MCI_RXACTIVE) len = mmci_pio_read(host, buffer, remain); if (status & MCI_TXACTIVE) len = mmci_pio_write(host, buffer, remain, status); sg_miter->consumed = len; host->size -= len; remain -= len; if (remain) break; status = readl(base + MMCISTATUS); } while (1); sg_miter_stop(sg_miter); local_irq_restore(flags); /* * If we have less than the fifo 'half-full' threshold to transfer, * trigger a PIO interrupt as soon as any data is available. */ if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); /* * If we run out of data, disable the data IRQs; this * prevents a race where the FIFO becomes empty before * the chip itself has disabled the data path, and * stops us racing with our data end IRQ. */ if (host->size == 0) { mmci_set_mask1(host, 0); writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0); } return IRQ_HANDLED; } /* * Handle completion of command and data transfers. */ static irqreturn_t mmci_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; u32 status; int ret = 0; spin_lock(&host->lock); do { struct mmc_command *cmd; struct mmc_data *data; status = readl(host->base + MMCISTATUS); if (host->singleirq) { if (status & readl(host->base + MMCIMASK1)) mmci_pio_irq(irq, dev_id); status &= ~MCI_IRQ1MASK; } status &= readl(host->base + MMCIMASK0); writel(status, host->base + MMCICLEAR); dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); data = host->data; if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) mmci_data_irq(host, data, status); cmd = host->cmd; if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) mmci_cmd_irq(host, cmd, status); ret = 1; } while (status); spin_unlock(&host->lock); return IRQ_RETVAL(ret); } static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct mmci_host *host = mmc_priv(mmc); unsigned long flags; WARN_ON(host->mrq != NULL); if (mrq->data && !is_power_of_2(mrq->data->blksz)) { dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", mrq->data->blksz); mrq->cmd->error = -EINVAL; mmc_request_done(mmc, mrq); return; } spin_lock_irqsave(&host->lock, flags); host->mrq = mrq; if (mrq->data && mrq->data->flags & MMC_DATA_READ) mmci_start_data(host, mrq->data); mmci_start_command(host, mrq->cmd, 0); spin_unlock_irqrestore(&host->lock, flags); } static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmci_host *host = mmc_priv(mmc); u32 pwr = 0; unsigned long flags; int ret; switch (ios->power_mode) { case MMC_POWER_OFF: if (host->vcc) ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); break; case MMC_POWER_UP: if (host->vcc) { ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); if (ret) { dev_err(mmc_dev(mmc), "unable to set OCR\n"); /* * The .set_ios() function in the mmc_host_ops * struct return void, and failing to set the * power should be rare so we print an error * and return here. */ return; } } if (host->plat->vdd_handler) pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, ios->power_mode); /* The ST version does not have this, fall through to POWER_ON */ if (host->hw_designer != AMBA_VENDOR_ST) { pwr |= MCI_PWR_UP; break; } case MMC_POWER_ON: pwr |= MCI_PWR_ON; break; } if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { if (host->hw_designer != AMBA_VENDOR_ST) pwr |= MCI_ROD; else { /* * The ST Micro variant use the ROD bit for something * else and only has OD (Open Drain). */ pwr |= MCI_OD; } } spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); if (host->pwr != pwr) { host->pwr = pwr; writel(pwr, host->base + MMCIPOWER); } spin_unlock_irqrestore(&host->lock, flags); } static int mmci_get_ro(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); if (host->gpio_wp == -ENOSYS) return -ENOSYS; return gpio_get_value_cansleep(host->gpio_wp); } static int mmci_get_cd(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); struct mmci_platform_data *plat = host->plat; unsigned int status; if (host->gpio_cd == -ENOSYS) { if (!plat->status) return 1; /* Assume always present */ status = plat->status(mmc_dev(host->mmc)); } else status = !!gpio_get_value_cansleep(host->gpio_cd) ^ plat->cd_invert; /* * Use positive logic throughout - status is zero for no card, * non-zero for card inserted. */ return status; } static irqreturn_t mmci_cd_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; mmc_detect_change(host->mmc, msecs_to_jiffies(500)); return IRQ_HANDLED; } static const struct mmc_host_ops mmci_ops = { .request = mmci_request, .set_ios = mmci_set_ios, .get_ro = mmci_get_ro, .get_cd = mmci_get_cd, }; static int __devinit mmci_probe(struct amba_device *dev, const struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; struct variant_data *variant = id->data; struct mmci_host *host; struct mmc_host *mmc; int ret; /* must have platform data */ if (!plat) { ret = -EINVAL; goto out; } ret = amba_request_regions(dev, DRIVER_NAME); if (ret) goto out; mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); if (!mmc) { ret = -ENOMEM; goto rel_regions; } host = mmc_priv(mmc); host->mmc = mmc; host->gpio_wp = -ENOSYS; host->gpio_cd = -ENOSYS; host->gpio_cd_irq = -1; host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); host->clk = clk_get(&dev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; goto host_free; } ret = clk_enable(host->clk); if (ret) goto clk_free; host->plat = plat; host->variant = variant; host->mclk = clk_get_rate(host->clk); /* * According to the spec, mclk is max 100 MHz, * so we try to adjust the clock down to this, * (if possible). */ if (host->mclk > 100000000) { ret = clk_set_rate(host->clk, 100000000); if (ret < 0) goto clk_disable; host->mclk = clk_get_rate(host->clk); dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } host->phybase = dev->res.start; host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; goto clk_disable; } mmc->ops = &mmci_ops; mmc->f_min = (host->mclk + 511) / 512; /* * If the platform data supplies a maximum operating * frequency, this takes precedence. Else, we fall back * to using the module parameter, which has a (low) * default value in case it is not specified. Either * value must not exceed the clock rate into the block, * of course. */ if (plat->f_max) mmc->f_max = min(host->mclk, plat->f_max); else mmc->f_max = min(host->mclk, fmax); dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); #ifdef CONFIG_REGULATOR /* If we're using the regulator framework, try to fetch a regulator */ host->vcc = regulator_get(&dev->dev, "vmmc"); if (IS_ERR(host->vcc)) host->vcc = NULL; else { int mask = mmc_regulator_get_ocrmask(host->vcc); if (mask < 0) dev_err(&dev->dev, "error getting OCR mask (%d)\n", mask); else { host->mmc->ocr_avail = (u32) mask; if (plat->ocr_mask) dev_warn(&dev->dev, "Provided ocr_mask/setpower will not be used " "(using regulator instead)\n"); } } #endif /* Fall back to platform data if no regulator is found */ if (host->vcc == NULL) mmc->ocr_avail = plat->ocr_mask; mmc->caps = plat->capabilities; /* * We can do SGIO */ mmc->max_segs = NR_SG; /* * Since only a certain number of bits are valid in the data length * register, we must ensure that we don't exceed 2^num-1 bytes in a * single request. */ mmc->max_req_size = (1 << variant->datalength_bits) - 1; /* * Set the maximum segment size. Since we aren't doing DMA * (yet) we are only limited by the data length register. */ mmc->max_seg_size = mmc->max_req_size; /* * Block size can be up to 2048 bytes, but must be a power of two. */ mmc->max_blk_size = 2048; /* * No limit on the number of blocks transferred. */ mmc->max_blk_count = mmc->max_req_size; spin_lock_init(&host->lock); writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); if (gpio_is_valid(plat->gpio_cd)) { ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); if (ret == 0) ret = gpio_direction_input(plat->gpio_cd); if (ret == 0) host->gpio_cd = plat->gpio_cd; else if (ret != -ENOSYS) goto err_gpio_cd; /* * A gpio pin that will detect cards when inserted and removed * will most likely want to trigger on the edges if it is * 0 when ejected and 1 when inserted (or mutatis mutandis * for the inverted case) so we request triggers on both * edges. */ ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), mmci_cd_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, DRIVER_NAME " (cd)", host); if (ret >= 0) host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); } if (gpio_is_valid(plat->gpio_wp)) { ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); if (ret == 0) ret = gpio_direction_input(plat->gpio_wp); if (ret == 0) host->gpio_wp = plat->gpio_wp; else if (ret != -ENOSYS) goto err_gpio_wp; } if ((host->plat->status || host->gpio_cd != -ENOSYS) && host->gpio_cd_irq < 0) mmc->caps |= MMC_CAP_NEEDS_POLL; ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto unmap; if (dev->irq[1] == NO_IRQ) host->singleirq = true; else { ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) goto irq0_free; } writel(MCI_IRQENABLE, host->base + MMCIMASK0); amba_set_drvdata(dev, mmc); dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", mmc_hostname(mmc), amba_part(dev), amba_manf(dev), amba_rev(dev), (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); mmci_dma_setup(host); mmc_add_host(mmc); return 0; irq0_free: free_irq(dev->irq[0], host); unmap: if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); err_gpio_wp: if (host->gpio_cd_irq >= 0) free_irq(host->gpio_cd_irq, host); if (host->gpio_cd != -ENOSYS) gpio_free(host->gpio_cd); err_gpio_cd: iounmap(host->base); clk_disable: clk_disable(host->clk); clk_free: clk_put(host->clk); host_free: mmc_free_host(mmc); rel_regions: amba_release_regions(dev); out: return ret; } static int __devexit mmci_remove(struct amba_device *dev) { struct mmc_host *mmc = amba_get_drvdata(dev); amba_set_drvdata(dev, NULL); if (mmc) { struct mmci_host *host = mmc_priv(mmc); mmc_remove_host(mmc); writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK1); writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCIDATACTRL); mmci_dma_release(host); free_irq(dev->irq[0], host); if (!host->singleirq) free_irq(dev->irq[1], host); if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); if (host->gpio_cd_irq >= 0) free_irq(host->gpio_cd_irq, host); if (host->gpio_cd != -ENOSYS) gpio_free(host->gpio_cd); iounmap(host->base); clk_disable(host->clk); clk_put(host->clk); if (host->vcc) mmc_regulator_set_ocr(mmc, host->vcc, 0); regulator_put(host->vcc); mmc_free_host(mmc); amba_release_regions(dev); } return 0; } #ifdef CONFIG_PM static int mmci_suspend(struct amba_device *dev, pm_message_t state) { struct mmc_host *mmc = amba_get_drvdata(dev); int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); ret = mmc_suspend_host(mmc); if (ret == 0) writel(0, host->base + MMCIMASK0); } return ret; } static int mmci_resume(struct amba_device *dev) { struct mmc_host *mmc = amba_get_drvdata(dev); int ret = 0; if (mmc) { struct mmci_host *host = mmc_priv(mmc); writel(MCI_IRQENABLE, host->base + MMCIMASK0); ret = mmc_resume_host(mmc); } return ret; } #else #define mmci_suspend NULL #define mmci_resume NULL #endif static struct amba_id mmci_ids[] = { { .id = 0x00041180, .mask = 0xff0fffff, .data = &variant_arm, }, { .id = 0x01041180, .mask = 0xff0fffff, .data = &variant_arm_extended_fifo, }, { .id = 0x00041181, .mask = 0x000fffff, .data = &variant_arm, }, /* ST Micro variants */ { .id = 0x00180180, .mask = 0x00ffffff, .data = &variant_u300, }, { .id = 0x00280180, .mask = 0x00ffffff, .data = &variant_u300, }, { .id = 0x00480180, .mask = 0xf0ffffff, .data = &variant_ux500, }, { .id = 0x10480180, .mask = 0xf0ffffff, .data = &variant_ux500v2, }, { 0, 0 }, }; static struct amba_driver mmci_driver = { .drv = { .name = DRIVER_NAME, }, .probe = mmci_probe, .remove = __devexit_p(mmci_remove), .suspend = mmci_suspend, .resume = mmci_resume, .id_table = mmci_ids, }; static int __init mmci_init(void) { return amba_driver_register(&mmci_driver); } static void __exit mmci_exit(void) { amba_driver_unregister(&mmci_driver); } module_init(mmci_init); module_exit(mmci_exit); module_param(fmax, uint, 0444); MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); MODULE_LICENSE("GPL");
gpl-2.0
ErikAndren/linux
drivers/net/ethernet/packetengines/hamachi.c
614
64048
/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */ /* Written 1998-2000 by Donald Becker. Updates 2000 by Keith Underwood. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet adapter. Support and updates available at http://www.scyld.com/network/hamachi.html [link no longer provides useful info -jgarzik] or http://www.parl.clemson.edu/~keithu/hamachi.html */ #define DRV_NAME "hamachi" #define DRV_VERSION "2.1" #define DRV_RELDATE "Sept 11, 2006" /* A few user-configurable values. */ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ #define final_version #define hamachi_debug debug /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 40; static int mtu; /* Default values selected by testing on a dual processor PIII-450 */ /* These six interrupt control parameters may be set directly when loading the * module, or through the rx_params and tx_params variables */ static int max_rx_latency = 0x11; static int max_rx_gap = 0x05; static int min_rx_pkt = 0x18; static int max_tx_latency = 0x00; static int max_tx_gap = 0x00; static int min_tx_pkt = 0x30; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. -Setting to > 1518 causes all frames to be copied -Setting to 0 disables copies */ static int rx_copybreak; /* An override for the hardware detection of bus width. Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit. Add 2 to disable parity detection. */ static int force32; /* Used to pass the media type, etc. These exist for driver interoperability. No media types are currently defined. - The lower 4 bits are reserved for the media type. - The next three bits may be set to one of the following: 0x00000000 : Autodetect PCI bus 0x00000010 : Force 32 bit PCI bus 0x00000020 : Disable parity detection 0x00000040 : Force 64 bit PCI bus Default is autodetect - The next bit can be used to force half-duplex. This is a bad idea since no known implementations implement half-duplex, and, in general, half-duplex for gigabit ethernet is a bad idea. 0x00000080 : Force half-duplex Default is full-duplex. - In the original driver, the ninth bit could be used to force full-duplex. Maintain that for compatibility 0x00000200 : Force full-duplex */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* The Hamachi chipset supports 3 parameters each for Rx and Tx * interruput management. Parameters will be loaded as specified into * the TxIntControl and RxIntControl registers. * * The registers are arranged as follows: * 23 - 16 15 - 8 7 - 0 * _________________________________ * | min_pkt | max_gap | max_latency | * --------------------------------- * min_pkt : The minimum number of packets processed between * interrupts. * max_gap : The maximum inter-packet gap in units of 8.192 us * max_latency : The absolute time between interrupts in units of 8.192 us * */ static int rx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings, except for excessive memory usage */ /* Empirically it appears that the Tx ring needs to be a little bigger for these Gbit adapters or you get into an overrun condition really easily. Also, things appear to work a bit better in back-to-back configurations if the Rx ring is 8 times the size of the Tx ring */ #define TX_RING_SIZE 64 #define RX_RING_SIZE 512 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct hamachi_desc) /* * Enable netdev_ioctl. Added interrupt coalescing parameter adjustment. * 2/19/99 Pete Wyckoff <wyckoff@ca.sandia.gov> */ /* play with 64-bit addrlen; seems to be a teensy bit slower --pw */ /* #define ADDRLEN 64 */ /* * RX_CHECKSUM turns on card-generated receive checksum generation for * TCP and UDP packets. Otherwise the upper layers do the calculation. * 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov> */ #define RX_CHECKSUM /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5*HZ) #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/delay.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/unaligned.h> #include <asm/cache.h> static const char version[] = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" " Some modifications by Eric kasten <kasten@nscl.msu.edu>\n" " Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n"; /* IP_MF appears to be only defined in <netinet/ip.h>, however, we need it for hardware checksumming support. FYI... some of the definitions in <netinet/ip.h> conflict/duplicate those in other linux headers causing many compiler warnings. */ #ifndef IP_MF #define IP_MF 0x2000 /* IP more frags from <netinet/ip.h> */ #endif /* Define IP_OFFSET to be IPOPT_OFFSET */ #ifndef IP_OFFSET #ifdef IPOPT_OFFSET #define IP_OFFSET IPOPT_OFFSET #else #define IP_OFFSET 2 #endif #endif #define RUN_AT(x) (jiffies + (x)) #ifndef ADDRLEN #define ADDRLEN 32 #endif /* Condensed bus+endian portability operations. */ #if ADDRLEN == 64 #define cpu_to_leXX(addr) cpu_to_le64(addr) #define leXX_to_cpu(addr) le64_to_cpu(addr) #else #define cpu_to_leXX(addr) cpu_to_le32(addr) #define leXX_to_cpu(addr) le32_to_cpu(addr) #endif /* Theory of Operation I. Board Compatibility This device driver is designed for the Packet Engines "Hamachi" Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit 66Mhz PCI card. II. Board-specific settings No jumpers exist on the board. The chip supports software correction of various motherboard wiring errors, however this driver does not support that feature. III. Driver operation IIIa. Ring buffers The Hamachi uses a typical descriptor based bus-master architecture. The descriptor list is similar to that used by the Digital Tulip. This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. This driver uses a zero-copy receive and transmit scheme similar my other network drivers. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the Hamachi as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack and replaced by a newly allocated skbuff. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. Gigabit cards are typically used on generously configured machines and the underfilled buffers have negligible impact compared to the benefit of a single allocation size, so the default value of zero results in never copying packets. IIIb/c. Transmit/Receive Structure The Rx and Tx descriptor structure are straight-forward, with no historical baggage that must be explained. Unlike the awkward DBDMA structure, there are no unused fields or option bits that had only one allowable setting. Two details should be noted about the descriptors: The chip supports both 32 bit and 64 bit address structures, and the length field is overwritten on the receive descriptors. The descriptor length is set in the control word for each channel. The development driver uses 32 bit addresses only, however 64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha. IIId. Synchronization This driver is very similar to my other network drivers. The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and other software. The send packet thread has partial control over the Tx ring and 'dev->tbusy' flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next queue slot is empty, it clears the tbusy flag when finished otherwise it sets the 'hmp->tx_full' flag. The interrupt handler has exclusive control over the Rx ring and records stats from the Tx ring. After reaping the stats, it marks the Tx queue entry as empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it clears both the tx_full and tbusy flags. IV. Notes Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards. IVb. References Hamachi Engineering Design Specification, 5/15/97 (Note: This version was marked "Confidential".) IVc. Errata None noted. V. Recent Changes 01/15/1999 EPK Enlargement of the TX and RX ring sizes. This appears to help avoid some stall conditions -- this needs further research. 01/15/1999 EPK Creation of the hamachi_tx function. This function cleans the Tx ring and is called from hamachi_start_xmit (this used to be called from hamachi_interrupt but it tends to delay execution of the interrupt handler and thus reduce bandwidth by reducing the latency between hamachi_rx()'s). Notably, some modification has been made so that the cleaning loop checks only to make sure that the DescOwn bit isn't set in the status flag since the card is not required to set the entire flag to zero after processing. 01/15/1999 EPK In the hamachi_start_tx function, the Tx ring full flag is checked before attempting to add a buffer to the ring. If the ring is full an attempt is made to free any dirty buffers and thus find space for the new buffer or the function returns non-zero which should case the scheduler to reschedule the buffer later. 01/15/1999 EPK Some adjustments were made to the chip initialization. End-to-end flow control should now be fully active and the interrupt algorithm vars have been changed. These could probably use further tuning. 01/15/1999 EPK Added the max_{rx,tx}_latency options. These are used to set the rx and tx latencies for the Hamachi interrupts. If you're having problems with network stalls, try setting these to higher values. Valid values are 0x00 through 0xff. 01/15/1999 EPK In general, the overall bandwidth has increased and latencies are better (sometimes by a factor of 2). Stalls are rare at this point, however there still appears to be a bug somewhere between the hardware and driver. TCP checksum errors under load also appear to be eliminated at this point. 01/18/1999 EPK Ensured that the DescEndRing bit was being set on both the Rx and Tx rings. This appears to have been affecting whether a particular peer-to-peer connection would hang under high load. I believe the Rx rings was typically getting set correctly, but the Tx ring wasn't getting the DescEndRing bit set during initialization. ??? Does this mean the hamachi card is using the DescEndRing in processing even if a particular slot isn't in use -- hypothetically, the card might be searching the entire Tx ring for slots with the DescOwn bit set and then processing them. If the DescEndRing bit isn't set, then it might just wander off through memory until it hits a chunk of data with that bit set and then looping back. 02/09/1999 EPK Added Michel Mueller's TxDMA Interrupt and Tx-timeout problem (TxCmd and RxCmd need only to be set when idle or stopped. 02/09/1999 EPK Added code to check/reset dev->tbusy in hamachi_interrupt. (Michel Mueller pointed out the ``permanently busy'' potential problem here). 02/22/1999 EPK Added Pete Wyckoff's ioctl to control the Tx/Rx latencies. 02/23/1999 EPK Verified that the interrupt status field bits for Tx were incorrectly defined and corrected (as per Michel Mueller). 02/23/1999 EPK Corrected the Tx full check to check that at least 4 slots were available before reseting the tbusy and tx_full flags (as per Michel Mueller). 03/11/1999 EPK Added Pete Wyckoff's hardware checksumming support. 12/31/1999 KDU Cleaned up assorted things and added Don's code to force 32 bit. 02/20/2000 KDU Some of the control was just plain odd. Cleaned up the hamachi_start_xmit() and hamachi_interrupt() code. There is still some re-structuring I would like to do. 03/01/2000 KDU Experimenting with a WIDE range of interrupt mitigation parameters on a dual P3-450 setup yielded the new default interrupt mitigation parameters. Tx should interrupt VERY infrequently due to Eric's scheme. Rx should be more often... 03/13/2000 KDU Added a patch to make the Rx Checksum code interact nicely with non-linux machines. 03/13/2000 KDU Experimented with some of the configuration values: -It seems that enabling PCI performance commands for descriptors (changing RxDMACtrl and TxDMACtrl lower nibble from 5 to D) has minimal performance impact for any of my tests. (ttcp, netpipe, netperf) I will leave them that way until I hear further feedback. -Increasing the PCI_LATENCY_TIMER to 130 (2 + (burst size of 128 * (0 wait states + 1))) seems to slightly degrade performance. Leaving default at 64 pending further information. 03/14/2000 KDU Further tuning: -adjusted boguscnt in hamachi_rx() to depend on interrupt mitigation parameters chosen. -Selected a set of interrupt parameters based on some extensive testing. These may change with more testing. TO DO: -Consider borrowing from the acenic driver code to check PCI_COMMAND for PCI_COMMAND_INVALIDATE. Set maximum burst size to cache line size in that case. -fix the reset procedure. It doesn't quite work. */ /* A few values that may be tweaked. */ /* Size of each temporary Rx buffer, calculated as: * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum */ #define PKT_BUF_SZ 1536 /* For now, this is going to be set to the maximum size of an ethernet * packet. Eventually, we may want to make it a variable that is * related to the MTU */ #define MAX_FRAME_SIZE 1518 /* The rest of these values should never change. */ static void hamachi_timer(unsigned long data); enum capability_flags {CanHaveMII=1, }; static const struct chip_info { u16 vendor_id, device_id, device_id_mask, pad; const char *name; void (*media_timer)(unsigned long data); int flags; } chip_tbl[] = { {0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0}, {0,}, }; /* Offsets to the Hamachi registers. Various sizes. */ enum hamachi_offsets { TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10, RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30, PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B, LEDCtrl=0x06C, VirtualJumpers=0x06D, GPIO=0x6E, TxChecksum=0x074, RxChecksum=0x076, TxIntrCtrl=0x078, RxIntrCtrl=0x07C, InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088, EventStatus=0x08C, MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4, /* See enum MII_offsets below. */ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE, AddrMode=0x0D0, StationAddr=0x0D2, /* Gigabit AutoNegotiation. */ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8, ANLinkPartnerAbility=0x0EA, EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2, FIFOcfg=0x0F8, }; /* Offsets to the MII-mode registers. */ enum MII_offsets { MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC, MII_Status=0xAE, }; /* Bits in the interrupt status/mask registers. */ enum intr_status_bits { IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04, IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400, LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, }; /* The Hamachi Rx and Tx buffer descriptors. */ struct hamachi_desc { __le32 status_n_length; #if ADDRLEN == 64 u32 pad; __le64 addr; #else __le32 addr; #endif }; /* Bits in hamachi_desc.status_n_length */ enum desc_status_bits { DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000, DescIntr=0x10000000, }; #define PRIV_ALIGN 15 /* Required alignment mask */ #define MII_CNT 4 struct hamachi_private { /* Descriptor rings first for alignment. Tx requires a second descriptor for status. */ struct hamachi_desc *rx_ring; struct hamachi_desc *tx_ring; struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; struct timer_list timer; /* Media selection timer. */ /* Frequently used and paired value: keep adjacent for cache effect. */ spinlock_t lock; int chip_id; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int cur_tx, dirty_tx; unsigned int rx_buf_sz; /* Based on MTU+slack. */ unsigned int tx_full:1; /* The Tx queue is full. */ unsigned int duplex_lock:1; unsigned int default_port:4; /* Last dev->if_port value. */ /* MII transceiver section. */ int mii_cnt; /* MII device addresses. */ struct mii_if_info mii_if; /* MII lib hooks/info */ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ u32 rx_int_var, tx_int_var; /* interrupt control variables */ u32 option; /* Hold on to a copy of the options */ struct pci_dev *pci_dev; void __iomem *base; }; MODULE_AUTHOR("Donald Becker <becker@scyld.com>, Eric Kasten <kasten@nscl.msu.edu>, Keith Underwood <keithu@parl.clemson.edu>"); MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver"); MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(mtu, int, 0); module_param(debug, int, 0); module_param(min_rx_pkt, int, 0); module_param(max_rx_gap, int, 0); module_param(max_rx_latency, int, 0); module_param(min_tx_pkt, int, 0); module_param(max_tx_gap, int, 0); module_param(max_tx_latency, int, 0); module_param(rx_copybreak, int, 0); module_param_array(rx_params, int, NULL, 0); module_param_array(tx_params, int, NULL, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); module_param(force32, int, 0); MODULE_PARM_DESC(max_interrupt_work, "GNIC-II maximum events handled per interrupt"); MODULE_PARM_DESC(mtu, "GNIC-II MTU (all boards)"); MODULE_PARM_DESC(debug, "GNIC-II debug level (0-7)"); MODULE_PARM_DESC(min_rx_pkt, "GNIC-II minimum Rx packets processed between interrupts"); MODULE_PARM_DESC(max_rx_gap, "GNIC-II maximum Rx inter-packet gap in 8.192 microsecond units"); MODULE_PARM_DESC(max_rx_latency, "GNIC-II time between Rx interrupts in 8.192 microsecond units"); MODULE_PARM_DESC(min_tx_pkt, "GNIC-II minimum Tx packets processed between interrupts"); MODULE_PARM_DESC(max_tx_gap, "GNIC-II maximum Tx inter-packet gap in 8.192 microsecond units"); MODULE_PARM_DESC(max_tx_latency, "GNIC-II time between Tx interrupts in 8.192 microsecond units"); MODULE_PARM_DESC(rx_copybreak, "GNIC-II copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(rx_params, "GNIC-II min_rx_pkt+max_rx_gap+max_rx_latency"); MODULE_PARM_DESC(tx_params, "GNIC-II min_tx_pkt+max_tx_gap+max_tx_latency"); MODULE_PARM_DESC(options, "GNIC-II Bits 0-3: media type, bits 4-6: as force32, bit 7: half duplex, bit 9 full duplex"); MODULE_PARM_DESC(full_duplex, "GNIC-II full duplex setting(s) (1)"); MODULE_PARM_DESC(force32, "GNIC-II: Bit 0: 32 bit PCI, bit 1: disable parity, bit 2: 64 bit PCI (all boards)"); static int read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int hamachi_open(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void hamachi_timer(unsigned long data); static void hamachi_tx_timeout(struct net_device *dev); static void hamachi_init_ring(struct net_device *dev); static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t hamachi_interrupt(int irq, void *dev_instance); static int hamachi_rx(struct net_device *dev); static inline int hamachi_tx(struct net_device *dev); static void hamachi_error(struct net_device *dev, int intr_status); static int hamachi_close(struct net_device *dev); static struct net_device_stats *hamachi_get_stats(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static const struct ethtool_ops ethtool_ops; static const struct ethtool_ops ethtool_ops_no_mii; static const struct net_device_ops hamachi_netdev_ops = { .ndo_open = hamachi_open, .ndo_stop = hamachi_close, .ndo_start_xmit = hamachi_start_xmit, .ndo_get_stats = hamachi_get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = hamachi_tx_timeout, .ndo_do_ioctl = netdev_ioctl, }; static int hamachi_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct hamachi_private *hmp; int option, i, rx_int_var, tx_int_var, boguscnt; int chip_id = ent->driver_data; int irq; void __iomem *ioaddr; unsigned long base; static int card_idx; struct net_device *dev; void *ring_space; dma_addr_t ring_dma; int ret = -ENOMEM; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif if (pci_enable_device(pdev)) { ret = -EIO; goto err_out; } base = pci_resource_start(pdev, 0); #ifdef __alpha__ /* Really "64 bit addrs" */ base |= (pci_resource_start(pdev, 1) << 32); #endif pci_set_master(pdev); i = pci_request_regions(pdev, DRV_NAME); if (i) return i; irq = pdev->irq; ioaddr = ioremap(base, 0x400); if (!ioaddr) goto err_out_release; dev = alloc_etherdev(sizeof(struct hamachi_private)); if (!dev) goto err_out_iounmap; SET_NETDEV_DEV(dev, &pdev->dev); for (i = 0; i < 6; i++) dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i) : readb(ioaddr + StationAddr + i); #if ! defined(final_version) if (hamachi_debug > 4) for (i = 0; i < 0x10; i++) printk("%2.2x%s", read_eeprom(ioaddr, i), i % 16 != 15 ? " " : "\n"); #endif hmp = netdev_priv(dev); spin_lock_init(&hmp->lock); hmp->mii_if.dev = dev; hmp->mii_if.mdio_read = mdio_read; hmp->mii_if.mdio_write = mdio_write; hmp->mii_if.phy_id_mask = 0x1f; hmp->mii_if.reg_num_mask = 0x1f; ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_cleardev; hmp->tx_ring = ring_space; hmp->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; hmp->rx_ring = ring_space; hmp->rx_ring_dma = ring_dma; /* Check for options being passed in */ option = card_idx < MAX_UNITS ? options[card_idx] : 0; if (dev->mem_start) option = dev->mem_start; /* If the bus size is misidentified, do the following. */ force32 = force32 ? force32 : ((option >= 0) ? ((option & 0x00000070) >> 4) : 0 ); if (force32) writeb(force32, ioaddr + VirtualJumpers); /* Hmmm, do we really need to reset the chip???. */ writeb(0x01, ioaddr + ChipReset); /* After a reset, the clock speed measurement of the PCI bus will not * be valid for a moment. Wait for a little while until it is. If * it takes more than 10ms, forget it. */ udelay(10); i = readb(ioaddr + PCIClkMeas); for (boguscnt = 0; (!(i & 0x080)) && boguscnt < 1000; boguscnt++){ udelay(10); i = readb(ioaddr + PCIClkMeas); } hmp->base = ioaddr; pci_set_drvdata(pdev, dev); hmp->chip_id = chip_id; hmp->pci_dev = pdev; /* The lower four bits are the media type. */ if (option > 0) { hmp->option = option; if (option & 0x200) hmp->mii_if.full_duplex = 1; else if (option & 0x080) hmp->mii_if.full_duplex = 0; hmp->default_port = option & 15; if (hmp->default_port) hmp->mii_if.force_media = 1; } if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0) hmp->mii_if.full_duplex = 1; /* lock the duplex mode if someone specified a value */ if (hmp->mii_if.full_duplex || (option & 0x080)) hmp->duplex_lock = 1; /* Set interrupt tuning parameters */ max_rx_latency = max_rx_latency & 0x00ff; max_rx_gap = max_rx_gap & 0x00ff; min_rx_pkt = min_rx_pkt & 0x00ff; max_tx_latency = max_tx_latency & 0x00ff; max_tx_gap = max_tx_gap & 0x00ff; min_tx_pkt = min_tx_pkt & 0x00ff; rx_int_var = card_idx < MAX_UNITS ? rx_params[card_idx] : -1; tx_int_var = card_idx < MAX_UNITS ? tx_params[card_idx] : -1; hmp->rx_int_var = rx_int_var >= 0 ? rx_int_var : (min_rx_pkt << 16 | max_rx_gap << 8 | max_rx_latency); hmp->tx_int_var = tx_int_var >= 0 ? tx_int_var : (min_tx_pkt << 16 | max_tx_gap << 8 | max_tx_latency); /* The Hamachi-specific entries in the device structure. */ dev->netdev_ops = &hamachi_netdev_ops; dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ? &ethtool_ops : &ethtool_ops_no_mii; dev->watchdog_timeo = TX_TIMEOUT; if (mtu) dev->mtu = mtu; i = register_netdev(dev); if (i) { ret = i; goto err_out_unmap_rx; } printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n", dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev), ioaddr, dev->dev_addr, irq); i = readb(ioaddr + PCIClkMeas); printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers " "%2.2x, LPA %4.4x.\n", dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32, i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers), readw(ioaddr + ANLinkPartnerAbility)); if (chip_tbl[hmp->chip_id].flags & CanHaveMII) { int phy, phy_idx = 0; for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) { int mii_status = mdio_read(dev, phy, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { hmp->phys[phy_idx++] = phy; hmp->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); printk(KERN_INFO "%s: MII PHY found at address %d, status " "0x%4.4x advertising %4.4x.\n", dev->name, phy, mii_status, hmp->mii_if.advertising); } } hmp->mii_cnt = phy_idx; if (hmp->mii_cnt > 0) hmp->mii_if.phy_id = hmp->phys[0]; else memset(&hmp->mii_if, 0, sizeof(hmp->mii_if)); } /* Configure gigabit autonegotiation. */ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */ card_idx++; return 0; err_out_unmap_rx: pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, hmp->rx_ring_dma); err_out_unmap_tx: pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, hmp->tx_ring_dma); err_out_cleardev: free_netdev (dev); err_out_iounmap: iounmap(ioaddr); err_out_release: pci_release_regions(pdev); err_out: return ret; } static int read_eeprom(void __iomem *ioaddr, int location) { int bogus_cnt = 1000; /* We should check busy first - per docs -KDU */ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0); writew(location, ioaddr + EEAddr); writeb(0x02, ioaddr + EECmdStatus); bogus_cnt = 1000; while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0); if (hamachi_debug > 5) printk(" EEPROM status is %2.2x after %d ticks.\n", (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt); return readb(ioaddr + EEData); } /* MII Managemen Data I/O accesses. These routines assume the MDIO controller is idle, and do not exit until the command is finished. */ static int mdio_read(struct net_device *dev, int phy_id, int location) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; int i; /* We should check busy first - per docs -KDU */ for (i = 10000; i >= 0; i--) if ((readw(ioaddr + MII_Status) & 1) == 0) break; writew((phy_id<<8) + location, ioaddr + MII_Addr); writew(0x0001, ioaddr + MII_Cmd); for (i = 10000; i >= 0; i--) if ((readw(ioaddr + MII_Status) & 1) == 0) break; return readw(ioaddr + MII_Rd_Data); } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; int i; /* We should check busy first - per docs -KDU */ for (i = 10000; i >= 0; i--) if ((readw(ioaddr + MII_Status) & 1) == 0) break; writew((phy_id<<8) + location, ioaddr + MII_Addr); writew(value, ioaddr + MII_Wr_Data); /* Wait for the command to finish. */ for (i = 10000; i >= 0; i--) if ((readw(ioaddr + MII_Status) & 1) == 0) break; } static int hamachi_open(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; int i; u32 rx_int_var, tx_int_var; u16 fifo_info; i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev); if (i) return i; hamachi_init_ring(dev); #if ADDRLEN == 64 /* writellll anyone ? */ writel(hmp->rx_ring_dma, ioaddr + RxPtr); writel(hmp->rx_ring_dma >> 32, ioaddr + RxPtr + 4); writel(hmp->tx_ring_dma, ioaddr + TxPtr); writel(hmp->tx_ring_dma >> 32, ioaddr + TxPtr + 4); #else writel(hmp->rx_ring_dma, ioaddr + RxPtr); writel(hmp->tx_ring_dma, ioaddr + TxPtr); #endif /* TODO: It would make sense to organize this as words since the card * documentation does. -KDU */ for (i = 0; i < 6; i++) writeb(dev->dev_addr[i], ioaddr + StationAddr + i); /* Initialize other registers: with so many this eventually this will converted to an offset/value list. */ /* Configure the FIFO */ fifo_info = (readw(ioaddr + GPIO) & 0x00C0) >> 6; switch (fifo_info){ case 0 : /* No FIFO */ writew(0x0000, ioaddr + FIFOcfg); break; case 1 : /* Configure the FIFO for 512K external, 16K used for Tx. */ writew(0x0028, ioaddr + FIFOcfg); break; case 2 : /* Configure the FIFO for 1024 external, 32K used for Tx. */ writew(0x004C, ioaddr + FIFOcfg); break; case 3 : /* Configure the FIFO for 2048 external, 32K used for Tx. */ writew(0x006C, ioaddr + FIFOcfg); break; default : printk(KERN_WARNING "%s: Unsupported external memory config!\n", dev->name); /* Default to no FIFO */ writew(0x0000, ioaddr + FIFOcfg); break; } if (dev->if_port == 0) dev->if_port = hmp->default_port; /* Setting the Rx mode will start the Rx process. */ /* If someone didn't choose a duplex, default to full-duplex */ if (hmp->duplex_lock != 1) hmp->mii_if.full_duplex = 1; /* always 1, takes no more time to do it */ writew(0x0001, ioaddr + RxChecksum); writew(0x0000, ioaddr + TxChecksum); writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */ writew(0x215F, ioaddr + MACCnfg); writew(0x000C, ioaddr + FrameGap0); /* WHAT?!?!? Why isn't this documented somewhere? -KDU */ writew(0x1018, ioaddr + FrameGap1); /* Why do we enable receives/transmits here? -KDU */ writew(0x0780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */ /* Enable automatic generation of flow control frames, period 0xffff. */ writel(0x0030FFFF, ioaddr + FlowCtrl); writew(MAX_FRAME_SIZE, ioaddr + MaxFrameSize); /* dev->mtu+14 ??? */ /* Enable legacy links. */ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */ /* Initial Link LED to blinking red. */ writeb(0x03, ioaddr + LEDCtrl); /* Configure interrupt mitigation. This has a great effect on performance, so systems tuning should start here!. */ rx_int_var = hmp->rx_int_var; tx_int_var = hmp->tx_int_var; if (hamachi_debug > 1) { printk("max_tx_latency: %d, max_tx_gap: %d, min_tx_pkt: %d\n", tx_int_var & 0x00ff, (tx_int_var & 0x00ff00) >> 8, (tx_int_var & 0x00ff0000) >> 16); printk("max_rx_latency: %d, max_rx_gap: %d, min_rx_pkt: %d\n", rx_int_var & 0x00ff, (rx_int_var & 0x00ff00) >> 8, (rx_int_var & 0x00ff0000) >> 16); printk("rx_int_var: %x, tx_int_var: %x\n", rx_int_var, tx_int_var); } writel(tx_int_var, ioaddr + TxIntrCtrl); writel(rx_int_var, ioaddr + RxIntrCtrl); set_rx_mode(dev); netif_start_queue(dev); /* Enable interrupts by setting the interrupt mask. */ writel(0x80878787, ioaddr + InterruptEnable); writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ /* Configure and start the DMA channels. */ /* Burst sizes are in the low three bits: size = 4<<(val&7) */ #if ADDRLEN == 64 writew(0x005D, ioaddr + RxDMACtrl); /* 128 dword bursts */ writew(0x005D, ioaddr + TxDMACtrl); #else writew(0x001D, ioaddr + RxDMACtrl); writew(0x001D, ioaddr + TxDMACtrl); #endif writew(0x0001, ioaddr + RxCmd); if (hamachi_debug > 2) { printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n", dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus)); } /* Set the timer to check for link beat. */ init_timer(&hmp->timer); hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ hmp->timer.data = (unsigned long)dev; hmp->timer.function = hamachi_timer; /* timer handler */ add_timer(&hmp->timer); return 0; } static inline int hamachi_tx(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); /* Update the dirty pointer until we find an entry that is still owned by the card */ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) { int entry = hmp->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)) break; /* Free the original skb. */ skb = hmp->tx_skbuff[entry]; if (skb) { pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[entry] = NULL; } hmp->tx_ring[entry].status_n_length = 0; if (entry >= TX_RING_SIZE-1) hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); dev->stats.tx_packets++; } return 0; } static void hamachi_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; int next_tick = 10*HZ; if (hamachi_debug > 2) { printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA " "%4.4x.\n", dev->name, readw(ioaddr + ANStatus), readw(ioaddr + ANLinkPartnerAbility)); printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x " "%4.4x %4.4x %4.4x.\n", dev->name, readw(ioaddr + 0x0e0), readw(ioaddr + 0x0e2), readw(ioaddr + 0x0e4), readw(ioaddr + 0x0e6), readw(ioaddr + 0x0e8), readw(ioaddr + 0x0eA)); } /* We could do something here... nah. */ hmp->timer.expires = RUN_AT(next_tick); add_timer(&hmp->timer); } static void hamachi_tx_timeout(struct net_device *dev) { int i; struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x," " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus)); { printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) printk(KERN_CONT " %8.8x", le32_to_cpu(hmp->rx_ring[i].status_n_length)); printk(KERN_CONT "\n"); printk(KERN_DEBUG" Tx ring %p: ", hmp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_CONT " %4.4x", le32_to_cpu(hmp->tx_ring[i].status_n_length)); printk(KERN_CONT "\n"); } /* Reinit the hardware and make sure the Rx and Tx processes are up and running. */ dev->if_port = 0; /* The right way to do Reset. -KDU * -Clear OWN bit in all Rx/Tx descriptors * -Wait 50 uS for channels to go idle * -Turn off MAC receiver * -Issue Reset */ for (i = 0; i < RX_RING_SIZE; i++) hmp->rx_ring[i].status_n_length &= cpu_to_le32(~DescOwn); /* Presume that all packets in the Tx queue are gone if we have to * re-init the hardware. */ for (i = 0; i < TX_RING_SIZE; i++){ struct sk_buff *skb; if (i >= TX_RING_SIZE - 1) hmp->tx_ring[i].status_n_length = cpu_to_le32(DescEndRing) | (hmp->tx_ring[i].status_n_length & cpu_to_le32(0x0000ffff)); else hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff); skb = hmp->tx_skbuff[i]; if (skb){ pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[i] = NULL; } } udelay(60); /* Sleep 60 us just for safety sake */ writew(0x0002, ioaddr + RxCmd); /* STOP Rx */ writeb(0x01, ioaddr + ChipReset); /* Reinit the hardware */ hmp->tx_full = 0; hmp->cur_rx = hmp->cur_tx = 0; hmp->dirty_rx = hmp->dirty_tx = 0; /* Rx packets are also presumed lost; however, we need to make sure a * ring of buffers is in tact. -KDU */ for (i = 0; i < RX_RING_SIZE; i++){ struct sk_buff *skb = hmp->rx_skbuff[i]; if (skb){ pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->rx_ring[i].addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); hmp->rx_skbuff[i] = NULL; } } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz); hmp->rx_skbuff[i] = skb; if (skb == NULL) break; hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); } hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); /* Mark the last entry as wrapping the ring. */ hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); /* Trigger an immediate transmit demand. */ dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; /* Restart the chip's Tx/Rx processes . */ writew(0x0002, ioaddr + TxCmd); /* STOP Tx */ writew(0x0001, ioaddr + TxCmd); /* START Tx */ writew(0x0001, ioaddr + RxCmd); /* START Rx */ netif_wake_queue(dev); } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void hamachi_init_ring(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); int i; hmp->tx_full = 0; hmp->cur_rx = hmp->cur_tx = 0; hmp->dirty_rx = hmp->dirty_tx = 0; /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the * card needs room to do 8 byte alignment, +2 so we can reserve * the first 2 bytes, and +16 gets room for the status word from the * card. -KDU */ hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : (((dev->mtu+26+7) & ~7) + 16)); /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { hmp->rx_ring[i].status_n_length = 0; hmp->rx_skbuff[i] = NULL; } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); hmp->rx_skbuff[i] = skb; if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); } hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); for (i = 0; i < TX_RING_SIZE; i++) { hmp->tx_skbuff[i] = NULL; hmp->tx_ring[i].status_n_length = 0; } /* Mark the last entry of the ring */ hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); } static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); unsigned entry; u16 status; /* Ok, now make sure that the queue has space before trying to add another skbuff. if we return non-zero the scheduler should interpret this as a queue full and requeue the buffer for later. */ if (hmp->tx_full) { /* We should NEVER reach this point -KDU */ printk(KERN_WARNING "%s: Hamachi transmit queue full at slot %d.\n",dev->name, hmp->cur_tx); /* Wake the potentially-idle transmit channel. */ /* If we don't need to read status, DON'T -KDU */ status=readw(hmp->base + TxStatus); if( !(status & 0x0001) || (status & 0x0002)) writew(0x0001, hmp->base + TxCmd); return NETDEV_TX_BUSY; } /* Caution: the write order is important here, set the field with the "ownership" bits last. */ /* Calculate the next Tx descriptor entry. */ entry = hmp->cur_tx % TX_RING_SIZE; hmp->tx_skbuff[entry] = skb; hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE)); /* Hmmmm, could probably put a DescIntr on these, but the way the driver is currently coded makes Tx interrupts unnecessary since the clearing of the Tx ring is handled by the start_xmit routine. This organization helps mitigate the interrupts a bit and probably renders the max_tx_latency param useless. Update: Putting a DescIntr bit on all of the descriptors and mitigating interrupt frequency with the tx_min_pkt parameter. -KDU */ if (entry >= TX_RING_SIZE-1) /* Wrap ring */ hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescEndRing | DescIntr | skb->len); else hmp->tx_ring[entry].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescIntr | skb->len); hmp->cur_tx++; /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ /* If we don't need to read status, DON'T -KDU */ status=readw(hmp->base + TxStatus); if( !(status & 0x0001) || (status & 0x0002)) writew(0x0001, hmp->base + TxCmd); /* Immediately before returning, let's clear as many entries as we can. */ hamachi_tx(dev); /* We should kick the bottom half here, since we are not accepting * interrupts with every packet. i.e. realize that Gigabit ethernet * can transmit faster than ordinary machines can load packets; * hence, any packet that got put off because we were in the transmit * routine should IMMEDIATELY get a chance to be re-queued. -KDU */ if ((hmp->cur_tx - hmp->dirty_tx) < (TX_RING_SIZE - 4)) netif_wake_queue(dev); /* Typical path */ else { hmp->tx_full = 1; netif_stop_queue(dev); } if (hamachi_debug > 4) { printk(KERN_DEBUG "%s: Hamachi transmit frame #%d queued in slot %d.\n", dev->name, hmp->cur_tx, entry); } return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; long boguscnt = max_interrupt_work; int handled = 0; #ifndef final_version /* Can never occur. */ if (dev == NULL) { printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq); return IRQ_NONE; } #endif spin_lock(&hmp->lock); do { u32 intr_status = readl(ioaddr + InterruptClear); if (hamachi_debug > 4) printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n", dev->name, intr_status); if (intr_status == 0) break; handled = 1; if (intr_status & IntrRxDone) hamachi_rx(dev); if (intr_status & IntrTxDone){ /* This code should RARELY need to execute. After all, this is * a gigabit link, it should consume packets as fast as we put * them in AND we clear the Tx ring in hamachi_start_xmit(). */ if (hmp->tx_full){ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++){ int entry = hmp->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)) break; skb = hmp->tx_skbuff[entry]; /* Free the original skb. */ if (skb){ pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); hmp->tx_skbuff[entry] = NULL; } hmp->tx_ring[entry].status_n_length = 0; if (entry >= TX_RING_SIZE-1) hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); dev->stats.tx_packets++; } if (hmp->cur_tx - hmp->dirty_tx < TX_RING_SIZE - 4){ /* The ring is no longer full */ hmp->tx_full = 0; netif_wake_queue(dev); } } else { netif_wake_queue(dev); } } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr | LinkChange | NegotiationChange | StatsMax)) hamachi_error(dev, intr_status); if (--boguscnt < 0) { printk(KERN_WARNING "%s: Too much work at interrupt, status=0x%4.4x.\n", dev->name, intr_status); break; } } while (1); if (hamachi_debug > 3) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, readl(ioaddr + IntrStatus)); #ifndef final_version /* Code that should never be run! Perhaps remove after testing.. */ { static int stopit = 10; if (dev->start == 0 && --stopit < 0) { printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n", dev->name); free_irq(irq, dev); } } #endif spin_unlock(&hmp->lock); return IRQ_RETVAL(handled); } /* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */ static int hamachi_rx(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); int entry = hmp->cur_rx % RX_RING_SIZE; int boguscnt = (hmp->dirty_rx + RX_RING_SIZE) - hmp->cur_rx; if (hamachi_debug > 4) { printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n", entry, hmp->rx_ring[entry].status_n_length); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct hamachi_desc *desc = &(hmp->rx_ring[entry]); u32 desc_status = le32_to_cpu(desc->status_n_length); u16 data_size = desc_status; /* Implicit truncate */ u8 *buf_addr; s32 frame_status; if (desc_status & DescOwn) break; pci_dma_sync_single_for_cpu(hmp->pci_dev, leXX_to_cpu(desc->addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; frame_status = get_unaligned_le32(&(buf_addr[data_size - 12])); if (hamachi_debug > 4) printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", frame_status); if (--boguscnt < 0) break; if ( ! (desc_status & DescEndPacket)) { printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " "multiple buffers, entry %#x length %d status %4.4x!\n", dev->name, hmp->cur_rx, data_size, desc_status); printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n", dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]); printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n", dev->name, le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0xffff0000, le32_to_cpu(hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length) & 0x0000ffff, le32_to_cpu(hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length)); dev->stats.rx_length_errors++; } /* else Omit for prototype errata??? */ if (frame_status & 0x00380000) { /* There was an error. */ if (hamachi_debug > 2) printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n", frame_status); dev->stats.rx_errors++; if (frame_status & 0x00600000) dev->stats.rx_length_errors++; if (frame_status & 0x00080000) dev->stats.rx_frame_errors++; if (frame_status & 0x00100000) dev->stats.rx_crc_errors++; if (frame_status < 0) dev->stats.rx_dropped++; } else { struct sk_buff *skb; /* Omit CRC */ u16 pkt_len = (frame_status & 0x07ff) - 4; #ifdef RX_CHECKSUM u32 pfck = *(u32 *) &buf_addr[data_size - 8]; #endif #ifndef final_version if (hamachi_debug > 4) printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d" " of %d, bogus_cnt %d.\n", pkt_len, data_size, boguscnt); if (hamachi_debug > 5) printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n", dev->name, *(s32*)&(buf_addr[data_size - 20]), *(s32*)&(buf_addr[data_size - 16]), *(s32*)&(buf_addr[data_size - 12]), *(s32*)&(buf_addr[data_size - 8]), *(s32*)&(buf_addr[data_size - 4])); #endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { #ifdef RX_CHECKSUM printk(KERN_ERR "%s: rx_copybreak non-zero " "not good with RX_CHECKSUM\n", dev->name); #endif skb_reserve(skb, 2); /* 16 byte align the IP header */ pci_dma_sync_single_for_cpu(hmp->pci_dev, leXX_to_cpu(hmp->rx_ring[entry].addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); /* Call copy + cksum if available. */ #if 1 || USE_IP_COPYSUM skb_copy_to_linear_data(skb, hmp->rx_skbuff[entry]->data, pkt_len); skb_put(skb, pkt_len); #else memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma + entry*sizeof(*desc), pkt_len); #endif pci_dma_sync_single_for_device(hmp->pci_dev, leXX_to_cpu(hmp->rx_ring[entry].addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); } else { pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->rx_ring[entry].addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put(skb = hmp->rx_skbuff[entry], pkt_len); hmp->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); #ifdef RX_CHECKSUM /* TCP or UDP on ipv4, DIX encoding */ if (pfck>>24 == 0x91 || pfck>>24 == 0x51) { struct iphdr *ih = (struct iphdr *) skb->data; /* Check that IP packet is at least 46 bytes, otherwise, * there may be pad bytes included in the hardware checksum. * This wouldn't happen if everyone padded with 0. */ if (ntohs(ih->tot_len) >= 46){ /* don't worry about frags */ if (!(ih->frag_off & cpu_to_be16(IP_MF|IP_OFFSET))) { u32 inv = *(u32 *) &buf_addr[data_size - 16]; u32 *p = (u32 *) &buf_addr[data_size - 20]; register u32 crc, p_r, p_r1; if (inv & 4) { inv &= ~4; --p; } p_r = *p; p_r1 = *(p-1); switch (inv) { case 0: crc = (p_r & 0xffff) + (p_r >> 16); break; case 1: crc = (p_r >> 16) + (p_r & 0xffff) + (p_r1 >> 16 & 0xff00); break; case 2: crc = p_r + (p_r1 >> 16); break; case 3: crc = p_r + (p_r1 & 0xff00) + (p_r1 >> 16); break; default: /*NOTREACHED*/ crc = 0; } if (crc & 0xffff0000) { crc &= 0xffff; ++crc; } /* tcp/udp will add in pseudo */ skb->csum = ntohs(pfck & 0xffff); if (skb->csum > crc) skb->csum -= crc; else skb->csum += (~crc & 0xffff); /* * could do the pseudo myself and return * CHECKSUM_UNNECESSARY */ skb->ip_summed = CHECKSUM_COMPLETE; } } } #endif /* RX_CHECKSUM */ netif_rx(skb); dev->stats.rx_packets++; } entry = (++hmp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) { struct hamachi_desc *desc; entry = hmp->dirty_rx % RX_RING_SIZE; desc = &(hmp->rx_ring[entry]); if (hmp->rx_skbuff[entry] == NULL) { struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz + 2); hmp->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); if (entry >= RX_RING_SIZE-1) desc->status_n_length |= cpu_to_le32(DescOwn | DescEndPacket | DescEndRing | DescIntr); else desc->status_n_length |= cpu_to_le32(DescOwn | DescEndPacket | DescIntr); } /* Restart Rx engine if stopped. */ /* If we don't need to check status, don't. -KDU */ if (readw(hmp->base + RxStatus) & 0x0002) writew(0x0001, hmp->base + RxCmd); return 0; } /* This is more properly named "uncommon interrupt events", as it covers more than just errors. */ static void hamachi_error(struct net_device *dev, int intr_status) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; if (intr_status & (LinkChange|NegotiationChange)) { if (hamachi_debug > 1) printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl" " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n", dev->name, readw(ioaddr + 0x0E0), readw(ioaddr + 0x0E2), readw(ioaddr + ANLinkPartnerAbility), readl(ioaddr + IntrStatus)); if (readw(ioaddr + ANStatus) & 0x20) writeb(0x01, ioaddr + LEDCtrl); else writeb(0x03, ioaddr + LEDCtrl); } if (intr_status & StatsMax) { hamachi_get_stats(dev); /* Read the overflow bits to clear. */ readl(ioaddr + 0x370); readl(ioaddr + 0x3F0); } if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange|IntrRxDone|IntrTxDone)) && hamachi_debug) printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) dev->stats.tx_fifo_errors++; if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) dev->stats.rx_fifo_errors++; } static int hamachi_close(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; struct sk_buff *skb; int i; netif_stop_queue(dev); if (hamachi_debug > 1) { printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n", dev->name, readw(ioaddr + TxStatus), readw(ioaddr + RxStatus), readl(ioaddr + IntrStatus)); printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx, hmp->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ writel(0x0000, ioaddr + InterruptEnable); /* Stop the chip's Tx and Rx processes. */ writel(2, ioaddr + RxCmd); writew(2, ioaddr + TxCmd); #ifdef __i386__ if (hamachi_debug > 2) { printk(KERN_DEBUG " Tx ring at %8.8x:\n", (int)hmp->tx_ring_dma); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x.\n", readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ', i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr); printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)hmp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " %c #%d desc. %4.4x %8.8x\n", readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); if (hamachi_debug > 6) { if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) { u16 *addr = (u16 *) hmp->rx_skbuff[i]->data; int j; printk(KERN_DEBUG "Addr: "); for (j = 0; j < 0x50; j++) printk(" %4.4x", addr[j]); printk("\n"); } } } } #endif /* __i386__ debugging only */ free_irq(hmp->pci_dev->irq, dev); del_timer_sync(&hmp->timer); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { skb = hmp->rx_skbuff[i]; hmp->rx_ring[i].status_n_length = 0; if (skb) { pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->rx_ring[i].addr), hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(skb); hmp->rx_skbuff[i] = NULL; } hmp->rx_ring[i].addr = cpu_to_leXX(0xBADF00D0); /* An invalid address. */ } for (i = 0; i < TX_RING_SIZE; i++) { skb = hmp->tx_skbuff[i]; if (skb) { pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[i] = NULL; } } writeb(0x00, ioaddr + LEDCtrl); return 0; } static struct net_device_stats *hamachi_get_stats(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; /* We should lock this segment of code for SMP eventually, although the vulnerability window is very small and statistics are non-critical. */ /* Ok, what goes here? This appears to be stuck at 21 packets according to ifconfig. It does get incremented in hamachi_tx(), so I think I'll comment it out here and see if better things happen. */ /* dev->stats.tx_packets = readl(ioaddr + 0x000); */ /* Total Uni+Brd+Multi */ dev->stats.rx_bytes = readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */ dev->stats.tx_bytes = readl(ioaddr + 0x3B0); /* Multicast Rx */ dev->stats.multicast = readl(ioaddr + 0x320); /* Over+Undersized */ dev->stats.rx_length_errors = readl(ioaddr + 0x368); /* Jabber */ dev->stats.rx_over_errors = readl(ioaddr + 0x35C); /* Jabber */ dev->stats.rx_crc_errors = readl(ioaddr + 0x360); /* Symbol Errs */ dev->stats.rx_frame_errors = readl(ioaddr + 0x364); /* Dropped */ dev->stats.rx_missed_errors = readl(ioaddr + 0x36C); return &dev->stats; } static void set_rx_mode(struct net_device *dev) { struct hamachi_private *hmp = netdev_priv(dev); void __iomem *ioaddr = hmp->base; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ writew(0x000F, ioaddr + AddrMode); } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ writew(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ struct netdev_hw_addr *ha; int i = 0; netdev_for_each_mc_addr(ha, dev) { writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8); writel(0x20000 | (*(u16 *)&ha->addr[4]), ioaddr + 0x104 + i*8); i++; } /* Clear remaining entries. */ for (; i < 64; i++) writel(0, ioaddr + 0x104 + i*8); writew(0x0003, ioaddr + AddrMode); } else { /* Normal, unicast/broadcast-only mode. */ writew(0x0001, ioaddr + AddrMode); } } static int check_if_running(struct net_device *dev) { if (!netif_running(dev)) return -EINVAL; return 0; } static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct hamachi_private *np = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct hamachi_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); mii_ethtool_gset(&np->mii_if, ecmd); spin_unlock_irq(&np->lock); return 0; } static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct hamachi_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); res = mii_ethtool_sset(&np->mii_if, ecmd); spin_unlock_irq(&np->lock); return res; } static int hamachi_nway_reset(struct net_device *dev) { struct hamachi_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii_if); } static u32 hamachi_get_link(struct net_device *dev) { struct hamachi_private *np = netdev_priv(dev); return mii_link_ok(&np->mii_if); } static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = hamachi_get_drvinfo, .get_settings = hamachi_get_settings, .set_settings = hamachi_set_settings, .nway_reset = hamachi_nway_reset, .get_link = hamachi_get_link, }; static const struct ethtool_ops ethtool_ops_no_mii = { .begin = check_if_running, .get_drvinfo = hamachi_get_drvinfo, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct hamachi_private *np = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); int rc; if (!netif_running(dev)) return -EINVAL; if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */ u32 *d = (u32 *)&rq->ifr_ifru; /* Should add this check here or an ordinary user can do nasty * things. -KDU * * TODO: Shut down the Rx and Tx engines while doing this. */ if (!capable(CAP_NET_ADMIN)) return -EPERM; writel(d[0], np->base + TxIntrCtrl); writel(d[1], np->base + RxIntrCtrl); printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, (u32) readl(np->base + TxIntrCtrl), (u32) readl(np->base + RxIntrCtrl)); rc = 0; } else { spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); spin_unlock_irq(&np->lock); } return rc; } static void hamachi_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct hamachi_private *hmp = netdev_priv(dev); pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, hmp->rx_ring_dma); pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, hmp->tx_ring_dma); unregister_netdev(dev); iounmap(hmp->base); free_netdev(dev); pci_release_regions(pdev); } } static const struct pci_device_id hamachi_pci_tbl[] = { { 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, hamachi_pci_tbl); static struct pci_driver hamachi_driver = { .name = DRV_NAME, .id_table = hamachi_pci_tbl, .probe = hamachi_init_one, .remove = hamachi_remove_one, }; static int __init hamachi_init (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE printk(version); #endif return pci_register_driver(&hamachi_driver); } static void __exit hamachi_exit (void) { pci_unregister_driver(&hamachi_driver); } module_init(hamachi_init); module_exit(hamachi_exit);
gpl-2.0
Myself5/android_kernel_sony_msm
drivers/misc/carma/carma-fpga.c
2150
37391
/* * CARMA DATA-FPGA Access Driver * * Copyright (c) 2009-2011 Ira W. Snyder <iws@ovro.caltech.edu> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ /* * FPGA Memory Dump Format * * FPGA #0 control registers (32 x 32-bit words) * FPGA #1 control registers (32 x 32-bit words) * FPGA #2 control registers (32 x 32-bit words) * FPGA #3 control registers (32 x 32-bit words) * SYSFPGA control registers (32 x 32-bit words) * FPGA #0 correlation array (NUM_CORL0 correlation blocks) * FPGA #1 correlation array (NUM_CORL1 correlation blocks) * FPGA #2 correlation array (NUM_CORL2 correlation blocks) * FPGA #3 correlation array (NUM_CORL3 correlation blocks) * * Each correlation array consists of: * * Correlation Data (2 x NUM_LAGSn x 32-bit words) * Pipeline Metadata (2 x NUM_METAn x 32-bit words) * Quantization Counters (2 x NUM_QCNTn x 32-bit words) * * The NUM_CORLn, NUM_LAGSn, NUM_METAn, and NUM_QCNTn values come from * the FPGA configuration registers. They do not change once the FPGA's * have been programmed, they only change on re-programming. */ /* * Basic Description: * * This driver is used to capture correlation spectra off of the four data * processing FPGAs. The FPGAs are often reprogrammed at runtime, therefore * this driver supports dynamic enable/disable of capture while the device * remains open. * * The nominal capture rate is 64Hz (every 15.625ms). To facilitate this fast * capture rate, all buffers are pre-allocated to avoid any potentially long * running memory allocations while capturing. * * There are two lists and one pointer which are used to keep track of the * different states of data buffers. * * 1) free list * This list holds all empty data buffers which are ready to receive data. * * 2) inflight pointer * This pointer holds the currently inflight data buffer. This buffer is having * data copied into it by the DMA engine. * * 3) used list * This list holds data buffers which have been filled, and are waiting to be * read by userspace. * * All buffers start life on the free list, then move successively to the * inflight pointer, and then to the used list. After they have been read by * userspace, they are moved back to the free list. The cycle repeats as long * as necessary. * * It should be noted that all buffers are mapped and ready for DMA when they * are on any of the three lists. They are only unmapped when they are in the * process of being read by userspace. */ /* * Notes on the IRQ masking scheme: * * The IRQ masking scheme here is different than most other hardware. The only * way for the DATA-FPGAs to detect if the kernel has taken too long to copy * the data is if the status registers are not cleared before the next * correlation data dump is ready. * * The interrupt line is connected to the status registers, such that when they * are cleared, the interrupt is de-asserted. Therein lies our problem. We need * to schedule a long-running DMA operation and return from the interrupt * handler quickly, but we cannot clear the status registers. * * To handle this, the system controller FPGA has the capability to connect the * interrupt line to a user-controlled GPIO pin. This pin is driven high * (unasserted) and left that way. To mask the interrupt, we change the * interrupt source to the GPIO pin. Tada, we hid the interrupt. :) */ #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/dmaengine.h> #include <linux/seq_file.h> #include <linux/highmem.h> #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kref.h> #include <linux/io.h> #include <media/videobuf-dma-sg.h> /* system controller registers */ #define SYS_IRQ_SOURCE_CTL 0x24 #define SYS_IRQ_OUTPUT_EN 0x28 #define SYS_IRQ_OUTPUT_DATA 0x2C #define SYS_IRQ_INPUT_DATA 0x30 #define SYS_FPGA_CONFIG_STATUS 0x44 /* GPIO IRQ line assignment */ #define IRQ_CORL_DONE 0x10 /* FPGA registers */ #define MMAP_REG_VERSION 0x00 #define MMAP_REG_CORL_CONF1 0x08 #define MMAP_REG_CORL_CONF2 0x0C #define MMAP_REG_STATUS 0x48 #define SYS_FPGA_BLOCK 0xF0000000 #define DATA_FPGA_START 0x400000 #define DATA_FPGA_SIZE 0x80000 static const char drv_name[] = "carma-fpga"; #define NUM_FPGA 4 #define MIN_DATA_BUFS 8 #define MAX_DATA_BUFS 64 struct fpga_info { unsigned int num_lag_ram; unsigned int blk_size; }; struct data_buf { struct list_head entry; struct videobuf_dmabuf vb; size_t size; }; struct fpga_device { /* character device */ struct miscdevice miscdev; struct device *dev; struct mutex mutex; /* reference count */ struct kref ref; /* FPGA registers and information */ struct fpga_info info[NUM_FPGA]; void __iomem *regs; int irq; /* FPGA Physical Address/Size Information */ resource_size_t phys_addr; size_t phys_size; /* DMA structures */ struct sg_table corl_table; unsigned int corl_nents; struct dma_chan *chan; /* Protection for all members below */ spinlock_t lock; /* Device enable/disable flag */ bool enabled; /* Correlation data buffers */ wait_queue_head_t wait; struct list_head free; struct list_head used; struct data_buf *inflight; /* Information about data buffers */ unsigned int num_dropped; unsigned int num_buffers; size_t bufsize; struct dentry *dbg_entry; }; struct fpga_reader { struct fpga_device *priv; struct data_buf *buf; off_t buf_start; }; static void fpga_device_release(struct kref *ref) { struct fpga_device *priv = container_of(ref, struct fpga_device, ref); /* the last reader has exited, cleanup the last bits */ mutex_destroy(&priv->mutex); kfree(priv); } /* * Data Buffer Allocation Helpers */ /** * data_free_buffer() - free a single data buffer and all allocated memory * @buf: the buffer to free * * This will free all of the pages allocated to the given data buffer, and * then free the structure itself */ static void data_free_buffer(struct data_buf *buf) { /* It is ok to free a NULL buffer */ if (!buf) return; /* free all memory */ videobuf_dma_free(&buf->vb); kfree(buf); } /** * data_alloc_buffer() - allocate and fill a data buffer with pages * @bytes: the number of bytes required * * This allocates all space needed for a data buffer. It must be mapped before * use in a DMA transaction using videobuf_dma_map(). * * Returns NULL on failure */ static struct data_buf *data_alloc_buffer(const size_t bytes) { unsigned int nr_pages; struct data_buf *buf; int ret; /* calculate the number of pages necessary */ nr_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); /* allocate the buffer structure */ buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (!buf) goto out_return; /* initialize internal fields */ INIT_LIST_HEAD(&buf->entry); buf->size = bytes; /* allocate the videobuf */ videobuf_dma_init(&buf->vb); ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages); if (ret) goto out_free_buf; return buf; out_free_buf: kfree(buf); out_return: return NULL; } /** * data_free_buffers() - free all allocated buffers * @priv: the driver's private data structure * * Free all buffers allocated by the driver (except those currently in the * process of being read by userspace). * * LOCKING: must hold dev->mutex * CONTEXT: user */ static void data_free_buffers(struct fpga_device *priv) { struct data_buf *buf, *tmp; /* the device should be stopped, no DMA in progress */ BUG_ON(priv->inflight != NULL); list_for_each_entry_safe(buf, tmp, &priv->free, entry) { list_del_init(&buf->entry); videobuf_dma_unmap(priv->dev, &buf->vb); data_free_buffer(buf); } list_for_each_entry_safe(buf, tmp, &priv->used, entry) { list_del_init(&buf->entry); videobuf_dma_unmap(priv->dev, &buf->vb); data_free_buffer(buf); } priv->num_buffers = 0; priv->bufsize = 0; } /** * data_alloc_buffers() - allocate 1 seconds worth of data buffers * @priv: the driver's private data structure * * Allocate enough buffers for a whole second worth of data * * This routine will attempt to degrade nicely by succeeding even if a full * second worth of data buffers could not be allocated, as long as a minimum * number were allocated. In this case, it will print a message to the kernel * log. * * The device must not be modifying any lists when this is called. * * CONTEXT: user * LOCKING: must hold dev->mutex * * Returns 0 on success, -ERRNO otherwise */ static int data_alloc_buffers(struct fpga_device *priv) { struct data_buf *buf; int i, ret; for (i = 0; i < MAX_DATA_BUFS; i++) { /* allocate a buffer */ buf = data_alloc_buffer(priv->bufsize); if (!buf) break; /* map it for DMA */ ret = videobuf_dma_map(priv->dev, &buf->vb); if (ret) { data_free_buffer(buf); break; } /* add it to the list of free buffers */ list_add_tail(&buf->entry, &priv->free); priv->num_buffers++; } /* Make sure we allocated the minimum required number of buffers */ if (priv->num_buffers < MIN_DATA_BUFS) { dev_err(priv->dev, "Unable to allocate enough data buffers\n"); data_free_buffers(priv); return -ENOMEM; } /* Warn if we are running in a degraded state, but do not fail */ if (priv->num_buffers < MAX_DATA_BUFS) { dev_warn(priv->dev, "Unable to allocate %d buffers, using %d buffers instead\n", MAX_DATA_BUFS, i); } return 0; } /* * DMA Operations Helpers */ /** * fpga_start_addr() - get the physical address a DATA-FPGA * @priv: the driver's private data structure * @fpga: the DATA-FPGA number (zero based) */ static dma_addr_t fpga_start_addr(struct fpga_device *priv, unsigned int fpga) { return priv->phys_addr + 0x400000 + (0x80000 * fpga); } /** * fpga_block_addr() - get the physical address of a correlation data block * @priv: the driver's private data structure * @fpga: the DATA-FPGA number (zero based) * @blknum: the correlation block number (zero based) */ static dma_addr_t fpga_block_addr(struct fpga_device *priv, unsigned int fpga, unsigned int blknum) { return fpga_start_addr(priv, fpga) + (0x10000 * (1 + blknum)); } #define REG_BLOCK_SIZE (32 * 4) /** * data_setup_corl_table() - create the scatterlist for correlation dumps * @priv: the driver's private data structure * * Create the scatterlist for transferring a correlation dump from the * DATA FPGAs. This structure will be reused for each buffer than needs * to be filled with correlation data. * * Returns 0 on success, -ERRNO otherwise */ static int data_setup_corl_table(struct fpga_device *priv) { struct sg_table *table = &priv->corl_table; struct scatterlist *sg; struct fpga_info *info; int i, j, ret; /* Calculate the number of entries needed */ priv->corl_nents = (1 + NUM_FPGA) * REG_BLOCK_SIZE; for (i = 0; i < NUM_FPGA; i++) priv->corl_nents += priv->info[i].num_lag_ram; /* Allocate the scatterlist table */ ret = sg_alloc_table(table, priv->corl_nents, GFP_KERNEL); if (ret) { dev_err(priv->dev, "unable to allocate DMA table\n"); return ret; } /* Add the DATA FPGA registers to the scatterlist */ sg = table->sgl; for (i = 0; i < NUM_FPGA; i++) { sg_dma_address(sg) = fpga_start_addr(priv, i); sg_dma_len(sg) = REG_BLOCK_SIZE; sg = sg_next(sg); } /* Add the SYS-FPGA registers to the scatterlist */ sg_dma_address(sg) = SYS_FPGA_BLOCK; sg_dma_len(sg) = REG_BLOCK_SIZE; sg = sg_next(sg); /* Add the FPGA correlation data blocks to the scatterlist */ for (i = 0; i < NUM_FPGA; i++) { info = &priv->info[i]; for (j = 0; j < info->num_lag_ram; j++) { sg_dma_address(sg) = fpga_block_addr(priv, i, j); sg_dma_len(sg) = info->blk_size; sg = sg_next(sg); } } /* * All physical addresses and lengths are present in the structure * now. It can be reused for every FPGA DATA interrupt */ return 0; } /* * FPGA Register Access Helpers */ static void fpga_write_reg(struct fpga_device *priv, unsigned int fpga, unsigned int reg, u32 val) { const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); iowrite32be(val, priv->regs + fpga_start + reg); } static u32 fpga_read_reg(struct fpga_device *priv, unsigned int fpga, unsigned int reg) { const int fpga_start = DATA_FPGA_START + (fpga * DATA_FPGA_SIZE); return ioread32be(priv->regs + fpga_start + reg); } /** * data_calculate_bufsize() - calculate the data buffer size required * @priv: the driver's private data structure * * Calculate the total buffer size needed to hold a single block * of correlation data * * CONTEXT: user * * Returns 0 on success, -ERRNO otherwise */ static int data_calculate_bufsize(struct fpga_device *priv) { u32 num_corl, num_lags, num_meta, num_qcnt, num_pack; u32 conf1, conf2, version; u32 num_lag_ram, blk_size; int i; /* Each buffer starts with the 5 FPGA register areas */ priv->bufsize = (1 + NUM_FPGA) * REG_BLOCK_SIZE; /* Read and store the configuration data for each FPGA */ for (i = 0; i < NUM_FPGA; i++) { version = fpga_read_reg(priv, i, MMAP_REG_VERSION); conf1 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF1); conf2 = fpga_read_reg(priv, i, MMAP_REG_CORL_CONF2); /* minor version 2 and later */ if ((version & 0x000000FF) >= 2) { num_corl = (conf1 & 0x000000F0) >> 4; num_pack = (conf1 & 0x00000F00) >> 8; num_lags = (conf1 & 0x00FFF000) >> 12; num_meta = (conf1 & 0x7F000000) >> 24; num_qcnt = (conf2 & 0x00000FFF) >> 0; } else { num_corl = (conf1 & 0x000000F0) >> 4; num_pack = 1; /* implied */ num_lags = (conf1 & 0x000FFF00) >> 8; num_meta = (conf1 & 0x7FF00000) >> 20; num_qcnt = (conf2 & 0x00000FFF) >> 0; } num_lag_ram = (num_corl + num_pack - 1) / num_pack; blk_size = ((num_pack * num_lags) + num_meta + num_qcnt) * 8; priv->info[i].num_lag_ram = num_lag_ram; priv->info[i].blk_size = blk_size; priv->bufsize += num_lag_ram * blk_size; dev_dbg(priv->dev, "FPGA %d NUM_CORL: %d\n", i, num_corl); dev_dbg(priv->dev, "FPGA %d NUM_PACK: %d\n", i, num_pack); dev_dbg(priv->dev, "FPGA %d NUM_LAGS: %d\n", i, num_lags); dev_dbg(priv->dev, "FPGA %d NUM_META: %d\n", i, num_meta); dev_dbg(priv->dev, "FPGA %d NUM_QCNT: %d\n", i, num_qcnt); dev_dbg(priv->dev, "FPGA %d BLK_SIZE: %d\n", i, blk_size); } dev_dbg(priv->dev, "TOTAL BUFFER SIZE: %zu bytes\n", priv->bufsize); return 0; } /* * Interrupt Handling */ /** * data_disable_interrupts() - stop the device from generating interrupts * @priv: the driver's private data structure * * Hide interrupts by switching to GPIO interrupt source * * LOCKING: must hold dev->lock */ static void data_disable_interrupts(struct fpga_device *priv) { /* hide the interrupt by switching the IRQ driver to GPIO */ iowrite32be(0x2F, priv->regs + SYS_IRQ_SOURCE_CTL); } /** * data_enable_interrupts() - allow the device to generate interrupts * @priv: the driver's private data structure * * Unhide interrupts by switching to the FPGA interrupt source. At the * same time, clear the DATA-FPGA status registers. * * LOCKING: must hold dev->lock */ static void data_enable_interrupts(struct fpga_device *priv) { /* clear the actual FPGA corl_done interrupt */ fpga_write_reg(priv, 0, MMAP_REG_STATUS, 0x0); fpga_write_reg(priv, 1, MMAP_REG_STATUS, 0x0); fpga_write_reg(priv, 2, MMAP_REG_STATUS, 0x0); fpga_write_reg(priv, 3, MMAP_REG_STATUS, 0x0); /* flush the writes */ fpga_read_reg(priv, 0, MMAP_REG_STATUS); fpga_read_reg(priv, 1, MMAP_REG_STATUS); fpga_read_reg(priv, 2, MMAP_REG_STATUS); fpga_read_reg(priv, 3, MMAP_REG_STATUS); /* switch back to the external interrupt source */ iowrite32be(0x3F, priv->regs + SYS_IRQ_SOURCE_CTL); } /** * data_dma_cb() - DMAEngine callback for DMA completion * @data: the driver's private data structure * * Complete a DMA transfer from the DATA-FPGA's * * This is called via the DMA callback mechanism, and will handle moving the * completed DMA transaction to the used list, and then wake any processes * waiting for new data * * CONTEXT: any, softirq expected */ static void data_dma_cb(void *data) { struct fpga_device *priv = data; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); /* If there is no inflight buffer, we've got a bug */ BUG_ON(priv->inflight == NULL); /* Move the inflight buffer onto the used list */ list_move_tail(&priv->inflight->entry, &priv->used); priv->inflight = NULL; /* * If data dumping is still enabled, then clear the FPGA * status registers and re-enable FPGA interrupts */ if (priv->enabled) data_enable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); /* * We've changed both the inflight and used lists, so we need * to wake up any processes that are blocking for those events */ wake_up(&priv->wait); } /** * data_submit_dma() - prepare and submit the required DMA to fill a buffer * @priv: the driver's private data structure * @buf: the data buffer * * Prepare and submit the necessary DMA transactions to fill a correlation * data buffer. * * LOCKING: must hold dev->lock * CONTEXT: hardirq only * * Returns 0 on success, -ERRNO otherwise */ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf) { struct scatterlist *dst_sg, *src_sg; unsigned int dst_nents, src_nents; struct dma_chan *chan = priv->chan; struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; dma_addr_t dst, src; unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP; dst_sg = buf->vb.sglist; dst_nents = buf->vb.sglen; src_sg = priv->corl_table.sgl; src_nents = priv->corl_nents; /* * All buffers passed to this function should be ready and mapped * for DMA already. Therefore, we don't need to do anything except * submit it to the Freescale DMA Engine for processing */ /* setup the scatterlist to scatterlist transfer */ tx = chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents, src_sg, src_nents, 0); if (!tx) { dev_err(priv->dev, "unable to prep scatterlist DMA\n"); return -ENOMEM; } /* submit the transaction to the DMA controller */ cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { dev_err(priv->dev, "unable to submit scatterlist DMA\n"); return -ENOMEM; } /* Prepare the re-read of the SYS-FPGA block */ dst = sg_dma_address(dst_sg) + (NUM_FPGA * REG_BLOCK_SIZE); src = SYS_FPGA_BLOCK; tx = chan->device->device_prep_dma_memcpy(chan, dst, src, REG_BLOCK_SIZE, dma_flags); if (!tx) { dev_err(priv->dev, "unable to prep SYS-FPGA DMA\n"); return -ENOMEM; } /* Setup the callback */ tx->callback = data_dma_cb; tx->callback_param = priv; /* submit the transaction to the DMA controller */ cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { dev_err(priv->dev, "unable to submit SYS-FPGA DMA\n"); return -ENOMEM; } return 0; } #define CORL_DONE 0x1 #define CORL_ERR 0x2 static irqreturn_t data_irq(int irq, void *dev_id) { struct fpga_device *priv = dev_id; bool submitted = false; struct data_buf *buf; u32 status; int i; /* detect spurious interrupts via FPGA status */ for (i = 0; i < 4; i++) { status = fpga_read_reg(priv, i, MMAP_REG_STATUS); if (!(status & (CORL_DONE | CORL_ERR))) { dev_err(priv->dev, "spurious irq detected (FPGA)\n"); return IRQ_NONE; } } /* detect spurious interrupts via raw IRQ pin readback */ status = ioread32be(priv->regs + SYS_IRQ_INPUT_DATA); if (status & IRQ_CORL_DONE) { dev_err(priv->dev, "spurious irq detected (IRQ)\n"); return IRQ_NONE; } spin_lock(&priv->lock); /* * This is an error case that should never happen. * * If this driver has a bug and manages to re-enable interrupts while * a DMA is in progress, then we will hit this statement and should * start paying attention immediately. */ BUG_ON(priv->inflight != NULL); /* hide the interrupt by switching the IRQ driver to GPIO */ data_disable_interrupts(priv); /* If there are no free buffers, drop this data */ if (list_empty(&priv->free)) { priv->num_dropped++; goto out; } buf = list_first_entry(&priv->free, struct data_buf, entry); list_del_init(&buf->entry); BUG_ON(buf->size != priv->bufsize); /* Submit a DMA transfer to get the correlation data */ if (data_submit_dma(priv, buf)) { dev_err(priv->dev, "Unable to setup DMA transfer\n"); list_move_tail(&buf->entry, &priv->free); goto out; } /* Save the buffer for the DMA callback */ priv->inflight = buf; submitted = true; /* Start the DMA Engine */ dma_async_issue_pending(priv->chan); out: /* If no DMA was submitted, re-enable interrupts */ if (!submitted) data_enable_interrupts(priv); spin_unlock(&priv->lock); return IRQ_HANDLED; } /* * Realtime Device Enable Helpers */ /** * data_device_enable() - enable the device for buffered dumping * @priv: the driver's private data structure * * Enable the device for buffered dumping. Allocates buffers and hooks up * the interrupt handler. When this finishes, data will come pouring in. * * LOCKING: must hold dev->mutex * CONTEXT: user context only * * Returns 0 on success, -ERRNO otherwise */ static int data_device_enable(struct fpga_device *priv) { bool enabled; u32 val; int ret; /* multiple enables are safe: they do nothing */ spin_lock_irq(&priv->lock); enabled = priv->enabled; spin_unlock_irq(&priv->lock); if (enabled) return 0; /* check that the FPGAs are programmed */ val = ioread32be(priv->regs + SYS_FPGA_CONFIG_STATUS); if (!(val & (1 << 18))) { dev_err(priv->dev, "DATA-FPGAs are not enabled\n"); return -ENODATA; } /* read the FPGAs to calculate the buffer size */ ret = data_calculate_bufsize(priv); if (ret) { dev_err(priv->dev, "unable to calculate buffer size\n"); goto out_error; } /* allocate the correlation data buffers */ ret = data_alloc_buffers(priv); if (ret) { dev_err(priv->dev, "unable to allocate buffers\n"); goto out_error; } /* setup the source scatterlist for dumping correlation data */ ret = data_setup_corl_table(priv); if (ret) { dev_err(priv->dev, "unable to setup correlation DMA table\n"); goto out_error; } /* prevent the FPGAs from generating interrupts */ data_disable_interrupts(priv); /* hookup the irq handler */ ret = request_irq(priv->irq, data_irq, IRQF_SHARED, drv_name, priv); if (ret) { dev_err(priv->dev, "unable to request IRQ handler\n"); goto out_error; } /* allow the DMA callback to re-enable FPGA interrupts */ spin_lock_irq(&priv->lock); priv->enabled = true; spin_unlock_irq(&priv->lock); /* allow the FPGAs to generate interrupts */ data_enable_interrupts(priv); return 0; out_error: sg_free_table(&priv->corl_table); priv->corl_nents = 0; data_free_buffers(priv); return ret; } /** * data_device_disable() - disable the device for buffered dumping * @priv: the driver's private data structure * * Disable the device for buffered dumping. Stops new DMA transactions from * being generated, waits for all outstanding DMA to complete, and then frees * all buffers. * * LOCKING: must hold dev->mutex * CONTEXT: user only * * Returns 0 on success, -ERRNO otherwise */ static int data_device_disable(struct fpga_device *priv) { spin_lock_irq(&priv->lock); /* allow multiple disable */ if (!priv->enabled) { spin_unlock_irq(&priv->lock); return 0; } /* * Mark the device disabled * * This stops DMA callbacks from re-enabling interrupts */ priv->enabled = false; /* prevent the FPGAs from generating interrupts */ data_disable_interrupts(priv); /* wait until all ongoing DMA has finished */ while (priv->inflight != NULL) { spin_unlock_irq(&priv->lock); wait_event(priv->wait, priv->inflight == NULL); spin_lock_irq(&priv->lock); } spin_unlock_irq(&priv->lock); /* unhook the irq handler */ free_irq(priv->irq, priv); /* free the correlation table */ sg_free_table(&priv->corl_table); priv->corl_nents = 0; /* free all buffers: the free and used lists are not being changed */ data_free_buffers(priv); return 0; } /* * DEBUGFS Interface */ #ifdef CONFIG_DEBUG_FS /* * Count the number of entries in the given list */ static unsigned int list_num_entries(struct list_head *list) { struct list_head *entry; unsigned int ret = 0; list_for_each(entry, list) ret++; return ret; } static int data_debug_show(struct seq_file *f, void *offset) { struct fpga_device *priv = f->private; spin_lock_irq(&priv->lock); seq_printf(f, "enabled: %d\n", priv->enabled); seq_printf(f, "bufsize: %d\n", priv->bufsize); seq_printf(f, "num_buffers: %d\n", priv->num_buffers); seq_printf(f, "num_free: %d\n", list_num_entries(&priv->free)); seq_printf(f, "inflight: %d\n", priv->inflight != NULL); seq_printf(f, "num_used: %d\n", list_num_entries(&priv->used)); seq_printf(f, "num_dropped: %d\n", priv->num_dropped); spin_unlock_irq(&priv->lock); return 0; } static int data_debug_open(struct inode *inode, struct file *file) { return single_open(file, data_debug_show, inode->i_private); } static const struct file_operations data_debug_fops = { .owner = THIS_MODULE, .open = data_debug_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int data_debugfs_init(struct fpga_device *priv) { priv->dbg_entry = debugfs_create_file(drv_name, S_IRUGO, NULL, priv, &data_debug_fops); if (IS_ERR(priv->dbg_entry)) return PTR_ERR(priv->dbg_entry); return 0; } static void data_debugfs_exit(struct fpga_device *priv) { debugfs_remove(priv->dbg_entry); } #else static inline int data_debugfs_init(struct fpga_device *priv) { return 0; } static inline void data_debugfs_exit(struct fpga_device *priv) { } #endif /* CONFIG_DEBUG_FS */ /* * SYSFS Attributes */ static ssize_t data_en_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fpga_device *priv = dev_get_drvdata(dev); int ret; spin_lock_irq(&priv->lock); ret = snprintf(buf, PAGE_SIZE, "%u\n", priv->enabled); spin_unlock_irq(&priv->lock); return ret; } static ssize_t data_en_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct fpga_device *priv = dev_get_drvdata(dev); unsigned long enable; int ret; ret = strict_strtoul(buf, 0, &enable); if (ret) { dev_err(priv->dev, "unable to parse enable input\n"); return -EINVAL; } /* protect against concurrent enable/disable */ ret = mutex_lock_interruptible(&priv->mutex); if (ret) return ret; if (enable) ret = data_device_enable(priv); else ret = data_device_disable(priv); if (ret) { dev_err(priv->dev, "device %s failed\n", enable ? "enable" : "disable"); count = ret; goto out_unlock; } out_unlock: mutex_unlock(&priv->mutex); return count; } static DEVICE_ATTR(enable, S_IWUSR | S_IRUGO, data_en_show, data_en_set); static struct attribute *data_sysfs_attrs[] = { &dev_attr_enable.attr, NULL, }; static const struct attribute_group rt_sysfs_attr_group = { .attrs = data_sysfs_attrs, }; /* * FPGA Realtime Data Character Device */ static int data_open(struct inode *inode, struct file *filp) { /* * The miscdevice layer puts our struct miscdevice into the * filp->private_data field. We use this to find our private * data and then overwrite it with our own private structure. */ struct fpga_device *priv = container_of(filp->private_data, struct fpga_device, miscdev); struct fpga_reader *reader; int ret; /* allocate private data */ reader = kzalloc(sizeof(*reader), GFP_KERNEL); if (!reader) return -ENOMEM; reader->priv = priv; reader->buf = NULL; filp->private_data = reader; ret = nonseekable_open(inode, filp); if (ret) { dev_err(priv->dev, "nonseekable-open failed\n"); kfree(reader); return ret; } /* * success, increase the reference count of the private data structure * so that it doesn't disappear if the device is unbound */ kref_get(&priv->ref); return 0; } static int data_release(struct inode *inode, struct file *filp) { struct fpga_reader *reader = filp->private_data; struct fpga_device *priv = reader->priv; /* free the per-reader structure */ data_free_buffer(reader->buf); kfree(reader); filp->private_data = NULL; /* decrement our reference count to the private data */ kref_put(&priv->ref, fpga_device_release); return 0; } static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count, loff_t *f_pos) { struct fpga_reader *reader = filp->private_data; struct fpga_device *priv = reader->priv; struct list_head *used = &priv->used; bool drop_buffer = false; struct data_buf *dbuf; size_t avail; void *data; int ret; /* check if we already have a partial buffer */ if (reader->buf) { dbuf = reader->buf; goto have_buffer; } spin_lock_irq(&priv->lock); /* Block until there is at least one buffer on the used list */ while (list_empty(used)) { spin_unlock_irq(&priv->lock); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(priv->wait, !list_empty(used)); if (ret) return ret; spin_lock_irq(&priv->lock); } /* Grab the first buffer off of the used list */ dbuf = list_first_entry(used, struct data_buf, entry); list_del_init(&dbuf->entry); spin_unlock_irq(&priv->lock); /* Buffers are always mapped: unmap it */ videobuf_dma_unmap(priv->dev, &dbuf->vb); /* save the buffer for later */ reader->buf = dbuf; reader->buf_start = 0; have_buffer: /* Get the number of bytes available */ avail = dbuf->size - reader->buf_start; data = dbuf->vb.vaddr + reader->buf_start; /* Get the number of bytes we can transfer */ count = min(count, avail); /* Copy the data to the userspace buffer */ if (copy_to_user(ubuf, data, count)) return -EFAULT; /* Update the amount of available space */ avail -= count; /* * If there is still some data available, save the buffer for the * next userspace call to read() and return */ if (avail > 0) { reader->buf_start += count; reader->buf = dbuf; return count; } /* * Get the buffer ready to be reused for DMA * * If it fails, we pretend that the read never happed and return * -EFAULT to userspace. The read will be retried. */ ret = videobuf_dma_map(priv->dev, &dbuf->vb); if (ret) { dev_err(priv->dev, "unable to remap buffer for DMA\n"); return -EFAULT; } /* Lock against concurrent enable/disable */ spin_lock_irq(&priv->lock); /* the reader is finished with this buffer */ reader->buf = NULL; /* * One of two things has happened, the device is disabled, or the * device has been reconfigured underneath us. In either case, we * should just throw away the buffer. * * Lockdep complains if this is done under the spinlock, so we * handle it during the unlock path. */ if (!priv->enabled || dbuf->size != priv->bufsize) { drop_buffer = true; goto out_unlock; } /* The buffer is safe to reuse, so add it back to the free list */ list_add_tail(&dbuf->entry, &priv->free); out_unlock: spin_unlock_irq(&priv->lock); if (drop_buffer) { videobuf_dma_unmap(priv->dev, &dbuf->vb); data_free_buffer(dbuf); } return count; } static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl) { struct fpga_reader *reader = filp->private_data; struct fpga_device *priv = reader->priv; unsigned int mask = 0; poll_wait(filp, &priv->wait, tbl); if (!list_empty(&priv->used)) mask |= POLLIN | POLLRDNORM; return mask; } static int data_mmap(struct file *filp, struct vm_area_struct *vma) { struct fpga_reader *reader = filp->private_data; struct fpga_device *priv = reader->priv; unsigned long offset, vsize, psize, addr; /* VMA properties */ offset = vma->vm_pgoff << PAGE_SHIFT; vsize = vma->vm_end - vma->vm_start; psize = priv->phys_size - offset; addr = (priv->phys_addr + offset) >> PAGE_SHIFT; /* Check against the FPGA region's physical memory size */ if (vsize > psize) { dev_err(priv->dev, "requested mmap mapping too large\n"); return -EINVAL; } vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return io_remap_pfn_range(vma, vma->vm_start, addr, vsize, vma->vm_page_prot); } static const struct file_operations data_fops = { .owner = THIS_MODULE, .open = data_open, .release = data_release, .read = data_read, .poll = data_poll, .mmap = data_mmap, .llseek = no_llseek, }; /* * OpenFirmware Device Subsystem */ static bool dma_filter(struct dma_chan *chan, void *data) { /* * DMA Channel #0 is used for the FPGA Programmer, so ignore it * * This probably won't survive an unload/load cycle of the Freescale * DMAEngine driver, but that won't be a problem */ if (chan->chan_id == 0 && chan->device->dev_id == 0) return false; return true; } static int data_of_probe(struct platform_device *op) { struct device_node *of_node = op->dev.of_node; struct device *this_device; struct fpga_device *priv; struct resource res; dma_cap_mask_t mask; int ret; /* Allocate private data */ priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&op->dev, "Unable to allocate device private data\n"); ret = -ENOMEM; goto out_return; } dev_set_drvdata(&op->dev, priv); priv->dev = &op->dev; kref_init(&priv->ref); mutex_init(&priv->mutex); dev_set_drvdata(priv->dev, priv); spin_lock_init(&priv->lock); INIT_LIST_HEAD(&priv->free); INIT_LIST_HEAD(&priv->used); init_waitqueue_head(&priv->wait); /* Setup the misc device */ priv->miscdev.minor = MISC_DYNAMIC_MINOR; priv->miscdev.name = drv_name; priv->miscdev.fops = &data_fops; /* Get the physical address of the FPGA registers */ ret = of_address_to_resource(of_node, 0, &res); if (ret) { dev_err(&op->dev, "Unable to find FPGA physical address\n"); ret = -ENODEV; goto out_free_priv; } priv->phys_addr = res.start; priv->phys_size = resource_size(&res); /* ioremap the registers for use */ priv->regs = of_iomap(of_node, 0); if (!priv->regs) { dev_err(&op->dev, "Unable to ioremap registers\n"); ret = -ENOMEM; goto out_free_priv; } dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); dma_cap_set(DMA_INTERRUPT, mask); dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_SG, mask); /* Request a DMA channel */ priv->chan = dma_request_channel(mask, dma_filter, NULL); if (!priv->chan) { dev_err(&op->dev, "Unable to request DMA channel\n"); ret = -ENODEV; goto out_unmap_regs; } /* Find the correct IRQ number */ priv->irq = irq_of_parse_and_map(of_node, 0); if (priv->irq == NO_IRQ) { dev_err(&op->dev, "Unable to find IRQ line\n"); ret = -ENODEV; goto out_release_dma; } /* Drive the GPIO for FPGA IRQ high (no interrupt) */ iowrite32be(IRQ_CORL_DONE, priv->regs + SYS_IRQ_OUTPUT_DATA); /* Register the miscdevice */ ret = misc_register(&priv->miscdev); if (ret) { dev_err(&op->dev, "Unable to register miscdevice\n"); goto out_irq_dispose_mapping; } /* Create the debugfs files */ ret = data_debugfs_init(priv); if (ret) { dev_err(&op->dev, "Unable to create debugfs files\n"); goto out_misc_deregister; } /* Create the sysfs files */ this_device = priv->miscdev.this_device; dev_set_drvdata(this_device, priv); ret = sysfs_create_group(&this_device->kobj, &rt_sysfs_attr_group); if (ret) { dev_err(&op->dev, "Unable to create sysfs files\n"); goto out_data_debugfs_exit; } dev_info(&op->dev, "CARMA FPGA Realtime Data Driver Loaded\n"); return 0; out_data_debugfs_exit: data_debugfs_exit(priv); out_misc_deregister: misc_deregister(&priv->miscdev); out_irq_dispose_mapping: irq_dispose_mapping(priv->irq); out_release_dma: dma_release_channel(priv->chan); out_unmap_regs: iounmap(priv->regs); out_free_priv: kref_put(&priv->ref, fpga_device_release); out_return: return ret; } static int data_of_remove(struct platform_device *op) { struct fpga_device *priv = dev_get_drvdata(&op->dev); struct device *this_device = priv->miscdev.this_device; /* remove all sysfs files, now the device cannot be re-enabled */ sysfs_remove_group(&this_device->kobj, &rt_sysfs_attr_group); /* remove all debugfs files */ data_debugfs_exit(priv); /* disable the device from generating data */ data_device_disable(priv); /* remove the character device to stop new readers from appearing */ misc_deregister(&priv->miscdev); /* cleanup everything not needed by readers */ irq_dispose_mapping(priv->irq); dma_release_channel(priv->chan); iounmap(priv->regs); /* release our reference */ kref_put(&priv->ref, fpga_device_release); return 0; } static struct of_device_id data_of_match[] = { { .compatible = "carma,carma-fpga", }, {}, }; static struct platform_driver data_of_driver = { .probe = data_of_probe, .remove = data_of_remove, .driver = { .name = drv_name, .of_match_table = data_of_match, .owner = THIS_MODULE, }, }; module_platform_driver(data_of_driver); MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); MODULE_DESCRIPTION("CARMA DATA-FPGA Access Driver"); MODULE_LICENSE("GPL");
gpl-2.0
kumajaya/android_kernel_samsung_universal5422
drivers/acpi/acpica/utosi.c
2150
12716
/****************************************************************************** * * Module Name: utosi - Support for the _OSI predefined control method * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utosi") /* * Strings supported by the _OSI predefined control method (which is * implemented internally within this module.) * * March 2009: Removed "Linux" as this host no longer wants to respond true * for this string. Basically, the only safe OS strings are windows-related * and in many or most cases represent the only test path within the * BIOS-provided ASL code. * * The last element of each entry is used to track the newest version of * Windows that the BIOS has requested. */ static struct acpi_interface_info acpi_default_supported_interfaces[] = { /* Operating System Vendor Strings */ {"Windows 2000", NULL, 0, ACPI_OSI_WIN_2000}, /* Windows 2000 */ {"Windows 2001", NULL, 0, ACPI_OSI_WIN_XP}, /* Windows XP */ {"Windows 2001 SP1", NULL, 0, ACPI_OSI_WIN_XP_SP1}, /* Windows XP SP1 */ {"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */ {"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */ {"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows vista - Added 03/2006 */ {"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */ {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ /* Feature Group Strings */ {"Extended Address Space Descriptor", NULL, 0, 0} /* * All "optional" feature group strings (features that are implemented * by the host) should be dynamically added by the host via * acpi_install_interface and should not be manually added here. * * Examples of optional feature group strings: * * "Module Device" * "Processor Device" * "3.0 Thermal Model" * "3.0 _SCP Extensions" * "Processor Aggregator Device" */ }; /******************************************************************************* * * FUNCTION: acpi_ut_initialize_interfaces * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Initialize the global _OSI supported interfaces list * ******************************************************************************/ acpi_status acpi_ut_initialize_interfaces(void) { acpi_status status; u32 i; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return (status); } acpi_gbl_supported_interfaces = acpi_default_supported_interfaces; /* Link the static list of supported interfaces */ for (i = 0; i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1); i++) { acpi_default_supported_interfaces[i].next = &acpi_default_supported_interfaces[(acpi_size) i + 1]; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_interface_terminate * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Delete all interfaces in the global list. Sets * acpi_gbl_supported_interfaces to NULL. * ******************************************************************************/ acpi_status acpi_ut_interface_terminate(void) { acpi_status status; struct acpi_interface_info *next_interface; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { return (status); } next_interface = acpi_gbl_supported_interfaces; while (next_interface) { acpi_gbl_supported_interfaces = next_interface->next; /* Only interfaces added at runtime can be freed */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } next_interface = acpi_gbl_supported_interfaces; } acpi_os_release_mutex(acpi_gbl_osi_mutex); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_install_interface * * PARAMETERS: interface_name - The interface to install * * RETURN: Status * * DESCRIPTION: Install the interface into the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_install_interface(acpi_string interface_name) { struct acpi_interface_info *interface_info; /* Allocate info block and space for the name string */ interface_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_interface_info)); if (!interface_info) { return (AE_NO_MEMORY); } interface_info->name = ACPI_ALLOCATE_ZEROED(ACPI_STRLEN(interface_name) + 1); if (!interface_info->name) { ACPI_FREE(interface_info); return (AE_NO_MEMORY); } /* Initialize new info and insert at the head of the global list */ ACPI_STRCPY(interface_info->name, interface_name); interface_info->flags = ACPI_OSI_DYNAMIC; interface_info->next = acpi_gbl_supported_interfaces; acpi_gbl_supported_interfaces = interface_info; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_interface * * PARAMETERS: interface_name - The interface to remove * * RETURN: Status * * DESCRIPTION: Remove the interface from the global interface list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ acpi_status acpi_ut_remove_interface(acpi_string interface_name) { struct acpi_interface_info *previous_interface; struct acpi_interface_info *next_interface; previous_interface = next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { /* Found: name is in either the static list or was added at runtime */ if (next_interface->flags & ACPI_OSI_DYNAMIC) { /* Interface was added dynamically, remove and free it */ if (previous_interface == next_interface) { acpi_gbl_supported_interfaces = next_interface->next; } else { previous_interface->next = next_interface->next; } ACPI_FREE(next_interface->name); ACPI_FREE(next_interface); } else { /* * Interface is in static list. If marked invalid, then it * does not actually exist. Else, mark it invalid. */ if (next_interface->flags & ACPI_OSI_INVALID) { return (AE_NOT_EXIST); } next_interface->flags |= ACPI_OSI_INVALID; } return (AE_OK); } previous_interface = next_interface; next_interface = next_interface->next; } /* Interface was not found */ return (AE_NOT_EXIST); } /******************************************************************************* * * FUNCTION: acpi_ut_get_interface * * PARAMETERS: interface_name - The interface to find * * RETURN: struct acpi_interface_info if found. NULL if not found. * * DESCRIPTION: Search for the specified interface name in the global list. * Caller MUST hold acpi_gbl_osi_mutex * ******************************************************************************/ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name) { struct acpi_interface_info *next_interface; next_interface = acpi_gbl_supported_interfaces; while (next_interface) { if (!ACPI_STRCMP(interface_name, next_interface->name)) { return (next_interface); } next_interface = next_interface->next; } return (NULL); } /******************************************************************************* * * FUNCTION: acpi_ut_osi_implementation * * PARAMETERS: walk_state - Current walk state * * RETURN: Status * * DESCRIPTION: Implementation of the _OSI predefined control method. When * an invocation of _OSI is encountered in the system AML, * control is transferred to this function. * ******************************************************************************/ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state) { union acpi_operand_object *string_desc; union acpi_operand_object *return_desc; struct acpi_interface_info *interface_info; acpi_interface_handler interface_handler; acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ut_osi_implementation); /* Validate the string input argument (from the AML caller) */ string_desc = walk_state->arguments[0].object; if (!string_desc || (string_desc->common.type != ACPI_TYPE_STRING)) { return_ACPI_STATUS(AE_TYPE); } /* Create a return object */ return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); if (!return_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Default return value is 0, NOT SUPPORTED */ return_value = 0; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { acpi_ut_remove_reference(return_desc); return_ACPI_STATUS(status); } /* Lookup the interface in the global _OSI list */ interface_info = acpi_ut_get_interface(string_desc->string.pointer); if (interface_info && !(interface_info->flags & ACPI_OSI_INVALID)) { /* * The interface is supported. * Update the osi_data if necessary. We keep track of the latest * version of Windows that has been requested by the BIOS. */ if (interface_info->value > acpi_gbl_osi_data) { acpi_gbl_osi_data = interface_info->value; } return_value = ACPI_UINT32_MAX; } acpi_os_release_mutex(acpi_gbl_osi_mutex); /* * Invoke an optional _OSI interface handler. The host OS may wish * to do some interface-specific handling. For example, warn about * certain interfaces or override the true/false support value. */ interface_handler = acpi_gbl_interface_handler; if (interface_handler) { return_value = interface_handler(string_desc->string.pointer, return_value); } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO, "ACPI: BIOS _OSI(\"%s\") is %ssupported\n", string_desc->string.pointer, return_value == 0 ? "not " : "")); /* Complete the return object */ return_desc->integer.value = return_value; walk_state->return_desc = return_desc; return_ACPI_STATUS(AE_OK); }
gpl-2.0
advx9600/kernel-rp4412
drivers/gpu/drm/radeon/radeon_atpx_handler.c
2662
7275
/* * Copyright (c) 2010 Red Hat Inc. * Author : Dave Airlie <airlied@redhat.com> * * Licensed under GPLv2 * * ATPX support for both Intel/ATI */ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <linux/pci.h> #define ATPX_VERSION 0 #define ATPX_GPU_PWR 2 #define ATPX_MUX_SELECT 3 #define ATPX_I2C_MUX_SELECT 4 #define ATPX_SWITCH_START 5 #define ATPX_SWITCH_END 6 #define ATPX_INTEGRATED 0 #define ATPX_DISCRETE 1 #define ATPX_MUX_IGD 0 #define ATPX_MUX_DISCRETE 1 static struct radeon_atpx_priv { bool atpx_detected; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle atpx_handle; acpi_handle atrm_handle; } radeon_atpx_priv; /* retrieve the ROM in 4k blocks */ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, int offset, int len) { acpi_status status; union acpi_object atrm_arg_elements[2], *obj; struct acpi_object_list atrm_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; atrm_arg.count = 2; atrm_arg.pointer = &atrm_arg_elements[0]; atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; atrm_arg_elements[0].integer.value = offset; atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; atrm_arg_elements[1].integer.value = len; status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); if (ACPI_FAILURE(status)) { printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); return -ENODEV; } obj = (union acpi_object *)buffer.pointer; memcpy(bios+offset, obj->buffer.pointer, len); kfree(buffer.pointer); return len; } bool radeon_atrm_supported(struct pci_dev *pdev) { /* get the discrete ROM only via ATRM */ if (!radeon_atpx_priv.atpx_detected) return false; if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) return false; return true; } int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) { return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); } static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; union acpi_object atpx_arg_elements[2], *obj; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; atpx_arg_elements[0].integer.value = ATPX_VERSION; atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; atpx_arg_elements[1].integer.value = ATPX_VERSION; status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); if (ACPI_FAILURE(status)) { printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); return -ENOSYS; } obj = (union acpi_object *)buffer.pointer; if (obj && (obj->type == ACPI_TYPE_BUFFER)) printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2)); kfree(buffer.pointer); return 0; } static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) { acpi_status status; union acpi_object atpx_arg_elements[2]; struct acpi_object_list atpx_arg; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; uint8_t buf[4] = {0}; if (!handle) return -EINVAL; atpx_arg.count = 2; atpx_arg.pointer = &atpx_arg_elements[0]; atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; atpx_arg_elements[0].integer.value = cmd_id; buf[2] = value & 0xff; buf[3] = (value >> 8) & 0xff; atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; atpx_arg_elements[1].buffer.length = 4; atpx_arg_elements[1].buffer.pointer = buf; status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); if (ACPI_FAILURE(status)) { printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); return -ENOSYS; } kfree(buffer.pointer); return 0; } static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) { return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); } static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) { return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); } static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) { return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); } static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) { return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); } static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) { return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); } static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) { int gpu_id; if (id == VGA_SWITCHEROO_IGD) gpu_id = ATPX_INTEGRATED; else gpu_id = ATPX_DISCRETE; radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); return 0; } static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, enum vga_switcheroo_state state) { /* on w500 ACPI can't change intel gpu state */ if (id == VGA_SWITCHEROO_IGD) return 0; radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); return 0; } static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { acpi_handle dhandle, atpx_handle, atrm_handle; acpi_status status; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); if (ACPI_FAILURE(status)) return false; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); if (ACPI_FAILURE(status)) return false; radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx_handle = atpx_handle; radeon_atpx_priv.atrm_handle = atrm_handle; return true; } static int radeon_atpx_init(void) { /* set up the ATPX handle */ radeon_atpx_get_version(radeon_atpx_priv.atpx_handle); return 0; } static int radeon_atpx_get_client_id(struct pci_dev *pdev) { if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; else return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler radeon_atpx_handler = { .switchto = radeon_atpx_switchto, .power_state = radeon_atpx_power_state, .init = radeon_atpx_init, .get_client_id = radeon_atpx_get_client_id, }; static bool radeon_atpx_detect(void) { char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; bool has_atpx = false; int vga_count = 0; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); } if (has_atpx && vga_count == 2) { acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; return true; } return false; } void radeon_register_atpx_handler(void) { bool r; /* detect if we have any ATPX + 2 VGA in the system */ r = radeon_atpx_detect(); if (!r) return; vga_switcheroo_register_handler(&radeon_atpx_handler); } void radeon_unregister_atpx_handler(void) { vga_switcheroo_unregister_handler(); }
gpl-2.0
maniacx/android_kernel_htcleo-3.0_older
drivers/hid/hid-prodikeys.c
2662
21506
/* * HID driver for the Prodikeys PC-MIDI Keyboard * providing midi & extra multimedia keys functionality * * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk> * * Controls for Octave Shift Up/Down, Channel, and * Sustain Duration available via sysfs. * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/hid.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #define pk_debug(format, arg...) \ pr_debug("hid-prodikeys: " format "\n" , ## arg) #define pk_error(format, arg...) \ pr_err("hid-prodikeys: " format "\n" , ## arg) struct pcmidi_snd; struct pk_device { unsigned long quirks; struct hid_device *hdev; struct pcmidi_snd *pm; /* pcmidi device context */ }; struct pcmidi_snd; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; #define PCMIDI_SUSTAINED_MAX 32 struct pcmidi_snd { struct pk_device *pk; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; struct snd_rawmidi_substream *out_substream; unsigned long in_triggered; unsigned long out_active; }; #define PK_QUIRK_NOGET 0x00010000 #define PCMIDI_MIDDLE_C 60 #define PCMIDI_CHANNEL_MIN 0 #define PCMIDI_CHANNEL_MAX 15 #define PCMIDI_OCTAVE_MIN (-2) #define PCMIDI_OCTAVE_MAX 2 #define PCMIDI_SUSTAIN_MIN 0 #define PCMIDI_SUSTAIN_MAX 5000 static const char shortname[] = "PC-MIDI"; static const char longname[] = "Prodikeys PC-MIDI Keyboard"; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver"); /* Output routine for the sysfs channel file */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel); return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel, PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX); } /* Input routine for the sysfs channel file */ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned channel = 0; if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) { dbg_hid("pcmidi sysfs write channel=%u\n", channel); pk->pm->midi_channel = channel; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel, store_channel); static struct device_attribute *sysfs_device_attr_channel = { &dev_attr_channel, }; /* Output routine for the sysfs sustain file */ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain); return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain, PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX); } /* Input routine for the sysfs sustain file */ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned sustain = 0; if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) { dbg_hid("pcmidi sysfs write sustain=%u\n", sustain); pk->pm->midi_sustain = sustain; pk->pm->midi_sustain_mode = (0 == sustain || !pk->pm->midi_mode) ? 0 : 1; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain, store_sustain); static struct device_attribute *sysfs_device_attr_sustain = { &dev_attr_sustain, }; /* Output routine for the sysfs octave file */ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave); return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave, PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX); } /* Input routine for the sysfs octave file */ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int octave = 0; if (sscanf(buf, "%d", &octave) > 0 && octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) { dbg_hid("pcmidi sysfs write octave=%d\n", octave); pk->pm->midi_octave = octave; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave, store_octave); static struct device_attribute *sysfs_device_attr_octave = { &dev_attr_octave, }; static void pcmidi_send_note(struct pcmidi_snd *pm, unsigned char status, unsigned char note, unsigned char velocity) { unsigned long flags; unsigned char buffer[3]; buffer[0] = status; buffer[1] = note; buffer[2] = velocity; spin_lock_irqsave(&pm->rawmidi_in_lock, flags); if (!pm->in_substream) goto drop_note; if (!test_bit(pm->in_substream->number, &pm->in_triggered)) goto drop_note; snd_rawmidi_receive(pm->in_substream, buffer, 3); drop_note: spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags); return; } void pcmidi_sustained_note_release(unsigned long data) { struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data; pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity); pms->in_use = 0; } void init_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 0; pms->pm = pm; setup_timer(&pms->timer, pcmidi_sustained_note_release, (unsigned long)pms); } } void stop_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 1; del_timer_sync(&pms->timer); } } static int pcmidi_get_output_report(struct pcmidi_snd *pm) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report; list_for_each_entry(report, &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) { if (!(6 == report->id)) continue; if (report->maxfield < 1) { hid_err(hdev, "output report is empty\n"); break; } if (report->field[0]->report_count != 2) { hid_err(hdev, "field count too low\n"); break; } pm->pcmidi_report6 = report; return 0; } /* should never get here */ return -ENODEV; } static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report = pm->pcmidi_report6; report->field[0]->value[0] = 0x01; report->field[0]->value[1] = state; usbhid_submit_report(hdev, report, USB_DIR_OUT); } static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data) { u32 bit_mask; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); /*KEY_MAIL or octave down*/ if (pm->midi_mode && bit_mask == 0x004000) { /* octave down */ pm->midi_octave--; if (pm->midi_octave < -2) pm->midi_octave = -2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); return 1; } /*KEY_WWW or sustain*/ else if (pm->midi_mode && bit_mask == 0x000004) { /* sustain on/off*/ pm->midi_sustain_mode ^= 0x1; return 1; } return 0; /* continue key processing */ } static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size) { struct pcmidi_sustain *pms; unsigned i, j; unsigned char status, note, velocity; unsigned num_notes = (size-1)/2; for (j = 0; j < num_notes; j++) { note = data[j*2+1]; velocity = data[j*2+2]; if (note < 0x81) { /* note on */ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */ note = note - 0x54 + PCMIDI_MIDDLE_C + (pm->midi_octave * 12); if (0 == velocity) velocity = 1; /* force note on */ } else { /* note off */ status = 128 + pm->midi_channel; /* 1000nnnn */ note = note - 0x94 + PCMIDI_MIDDLE_C + (pm->midi_octave*12); if (pm->midi_sustain_mode) { for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; if (!pms->in_use) { pms->status = status; pms->note = note; pms->velocity = velocity; pms->in_use = 1; mod_timer(&pms->timer, jiffies + msecs_to_jiffies(pm->midi_sustain)); return 1; } } } } pcmidi_send_note(pm, status, note, velocity); } return 1; } static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) { unsigned key; u32 bit_mask; u32 bit_index; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = pm->last_key[bit_index]; if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); pm->last_key[bit_index] = 0; } } /* make keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = 0; switch ((0x01 << bit_index) & bit_mask) { case 0x000010: /* Fn lock*/ pm->fn_state ^= 0x000010; if (pm->fn_state) pcmidi_submit_output_report(pm, 0xc5); else pcmidi_submit_output_report(pm, 0xc6); continue; case 0x020000: /* midi launcher..send a key (qwerty) or not? */ pcmidi_submit_output_report(pm, 0xc1); pm->midi_mode ^= 0x01; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); continue; case 0x100000: /* KEY_MESSENGER or octave up */ dbg_hid("pcmidi mode: %d\n", pm->midi_mode); if (pm->midi_mode) { pm->midi_octave++; if (pm->midi_octave > 2) pm->midi_octave = 2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); continue; } else key = KEY_MESSENGER; break; case 0x400000: key = KEY_CALENDAR; break; case 0x080000: key = KEY_ADDRESSBOOK; break; case 0x040000: key = KEY_DOCUMENTS; break; case 0x800000: key = KEY_WORDPROCESSOR; break; case 0x200000: key = KEY_SPREADSHEET; break; case 0x010000: key = KEY_COFFEE; break; case 0x000100: key = KEY_HELP; break; case 0x000200: key = KEY_SEND; break; case 0x000400: key = KEY_REPLY; break; case 0x000800: key = KEY_FORWARDMAIL; break; case 0x001000: key = KEY_NEW; break; case 0x002000: key = KEY_OPEN; break; case 0x004000: key = KEY_CLOSE; break; case 0x008000: key = KEY_SAVE; break; case 0x000001: key = KEY_UNDO; break; case 0x000002: key = KEY_REDO; break; case 0x000004: key = KEY_SPELLCHECK; break; case 0x000008: key = KEY_PRINT; break; } if (key) { input_event(pm->input_ep82, EV_KEY, key, 1); pm->last_key[bit_index] = key; } } return 1; } int pcmidi_handle_report( struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size) { int ret = 0; switch (report_id) { case 0x01: /* midi keys (qwerty)*/ ret = pcmidi_handle_report1(pm, data); break; case 0x03: /* midi keyboard (musical)*/ ret = pcmidi_handle_report3(pm, data, size); break; case 0x04: /* multimedia/midi keys (qwerty)*/ ret = pcmidi_handle_report4(pm, data); break; } return ret; } void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input) { /* reassigned functionality for N/A keys MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, KEY_WORDPROCESSOR, KEY_SPREADSHEET, KEY_COFFEE, KEY_HELP, KEY_SEND, KEY_REPLY, KEY_FORWARDMAIL, KEY_NEW, KEY_OPEN, KEY_CLOSE, KEY_SAVE, KEY_UNDO, KEY_REDO, KEY_SPELLCHECK, KEY_PRINT, 0 }; unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ return; pm->input_ep82 = input; for (i = 0; i < 24; i++) pm->last_key[i] = 0; while (*pkeys != 0) { set_bit(*pkeys, pm->input_ep82->keybit); ++pkeys; } } static int pcmidi_set_operational(struct pcmidi_snd *pm) { if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ pcmidi_get_output_report(pm); pcmidi_submit_output_report(pm, 0xc1); return 0; } static int pcmidi_snd_free(struct snd_device *dev) { return 0; } static int pcmidi_in_open(struct snd_rawmidi_substream *substream) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in open\n"); pm->in_substream = substream; return 0; } static int pcmidi_in_close(struct snd_rawmidi_substream *substream) { dbg_hid("pcmidi in close\n"); return 0; } static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in trigger %d\n", up); pm->in_triggered = up; } static struct snd_rawmidi_ops pcmidi_in_ops = { .open = pcmidi_in_open, .close = pcmidi_in_close, .trigger = pcmidi_in_trigger }; int pcmidi_snd_initialise(struct pcmidi_snd *pm) { static int dev; struct snd_card *card; struct snd_rawmidi *rwmidi; int err; static struct snd_device_ops ops = { .dev_free = pcmidi_snd_free, }; if (pm->ifnum != 1) return 0; /* only set up midi device ONCE for interace 1 */ if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* Setup sound card */ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { pk_error("failed to create pc-midi sound card\n"); err = -ENOMEM; goto fail; } pm->card = card; /* Setup sound device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops); if (err < 0) { pk_error("failed to create pc-midi sound device: error %d\n", err); goto fail; } strncpy(card->driver, shortname, sizeof(card->driver)); strncpy(card->shortname, shortname, sizeof(card->shortname)); strncpy(card->longname, longname, sizeof(card->longname)); /* Set up rawmidi */ err = snd_rawmidi_new(card, card->shortname, 0, 0, 1, &rwmidi); if (err < 0) { pk_error("failed to create pc-midi rawmidi device: error %d\n", err); goto fail; } pm->rwmidi = rwmidi; strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name)); rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT; rwmidi->private_data = pm; snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT, &pcmidi_in_ops); snd_card_set_dev(card, &pm->pk->hdev->dev); /* create sysfs variables */ err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); if (err < 0) { pk_error("failed to create sysfs attribute channel: error %d\n", err); goto fail; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); if (err < 0) { pk_error("failed to create sysfs attribute sustain: error %d\n", err); goto fail_attr_sustain; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); if (err < 0) { pk_error("failed to create sysfs attribute octave: error %d\n", err); goto fail_attr_octave; } spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); pcmidi_set_operational(pm); /* register it */ err = snd_card_register(card); if (err < 0) { pk_error("failed to register pc-midi sound card: error %d\n", err); goto fail_register; } dbg_hid("pcmidi_snd_initialise finished ok\n"); return 0; fail_register: stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); fail_attr_octave: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); fail_attr_sustain: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); fail: if (pm->card) { snd_card_free(pm->card); pm->card = NULL; } return err; } int pcmidi_snd_terminate(struct pcmidi_snd *pm) { if (pm->card) { stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); snd_card_disconnect(pm->card); snd_card_free_when_closed(pm->card); } return 0; } /* * PC-MIDI report descriptor for report id is wrong. */ static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && rdesc[111] == 0x06 && rdesc[112] == 0x00 && rdesc[113] == 0xff) { hid_info(hdev, "fixing up pc-midi keyboard report descriptor\n"); rdesc[144] = 0x18; /* report 4: was 0x10 report count */ } return rdesc; } static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) && 1 == pm->ifnum) { pcmidi_setup_extra_keys(pm, hi->input); return 0; } return 0; } static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int ret = 0; if (1 == pk->pm->ifnum) { if (report->id == data[0]) switch (report->id) { case 0x01: /* midi keys (qwerty)*/ case 0x03: /* midi keyboard (musical)*/ case 0x04: /* extra/midi keys (qwerty)*/ ret = pcmidi_handle_report(pk->pm, report->id, data, size); break; } } return ret; } static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct usb_interface *intf = to_usb_interface(hdev->dev.parent); unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } pk->hdev = hdev; pm = kzalloc(sizeof(*pm), GFP_KERNEL); if (pm == NULL) { hid_err(hdev, "can't alloc descriptor\n"); ret = -ENOMEM; goto err_free; } pm->pk = pk; pk->pm = pm; pm->ifnum = ifnum; hid_set_drvdata(hdev, pk); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed\n"); goto err_free; } if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */ hdev->quirks |= HID_QUIRK_NOGET; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = pcmidi_snd_initialise(pm); if (ret < 0) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: if (pm != NULL) kfree(pm); kfree(pk); return ret; } static void pk_remove(struct hid_device *hdev) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (pm) { pcmidi_snd_terminate(pm); kfree(pm); } hid_hw_stop(hdev); kfree(pk); } static const struct hid_device_id pk_devices[] = { {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI), .driver_data = PK_QUIRK_NOGET}, { } }; MODULE_DEVICE_TABLE(hid, pk_devices); static struct hid_driver pk_driver = { .name = "prodikeys", .id_table = pk_devices, .report_fixup = pk_report_fixup, .input_mapping = pk_input_mapping, .raw_event = pk_raw_event, .probe = pk_probe, .remove = pk_remove, }; static int pk_init(void) { int ret; ret = hid_register_driver(&pk_driver); if (ret) pr_err("can't register prodikeys driver\n"); return ret; } static void pk_exit(void) { hid_unregister_driver(&pk_driver); } module_init(pk_init); module_exit(pk_exit); MODULE_LICENSE("GPL");
gpl-2.0
Windeal/Linux-3.12.36
arch/blackfin/kernel/dumpstack.c
4198
4386
/* Provide basic stack dumping functions * * Copyright 2004-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/kernel.h> #include <linux/thread_info.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/module.h> #include <asm/trace.h> /* * Checks to see if the address pointed to is either a * 16-bit CALL instruction, or a 32-bit CALL instruction */ static bool is_bfin_call(unsigned short *addr) { unsigned int opcode; if (!get_instruction(&opcode, addr)) return false; if ((opcode >= 0x0060 && opcode <= 0x0067) || (opcode >= 0x0070 && opcode <= 0x0077) || (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF)) return true; return false; } void show_stack(struct task_struct *task, unsigned long *stack) { #ifdef CONFIG_PRINTK unsigned int *addr, *endstack, *fp = 0, *frame; unsigned short *ins_addr; char buf[150]; unsigned int i, j, ret_addr, frame_no = 0; /* * If we have been passed a specific stack, use that one otherwise * if we have been passed a task structure, use that, otherwise * use the stack of where the variable "stack" exists */ if (stack == NULL) { if (task) { /* We know this is a kernel stack, so this is the start/end */ stack = (unsigned long *)task->thread.ksp; endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); } else { /* print out the existing stack info */ stack = (unsigned long *)&stack; endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); } } else endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); printk(KERN_NOTICE "Stack info:\n"); decode_address(buf, (unsigned int)stack); printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { printk(KERN_NOTICE "Invalid stack pointer\n"); return; } /* First thing is to look for a frame pointer */ for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { if (*addr & 0x1) continue; ins_addr = (unsigned short *)*addr; ins_addr--; if (is_bfin_call(ins_addr)) fp = addr - 1; if (fp) { /* Let's check to see if it is a frame pointer */ while (fp >= (addr - 1) && fp < endstack && fp && ((unsigned int) fp & 0x3) == 0) fp = (unsigned int *)*fp; if (fp == 0 || fp == endstack) { fp = addr - 1; break; } fp = 0; } } if (fp) { frame = fp; printk(KERN_NOTICE " FP: (0x%p)\n", fp); } else frame = 0; /* * Now that we think we know where things are, we * walk the stack again, this time printing things out * incase there is no frame pointer, we still look for * valid return addresses */ /* First time print out data, next time, print out symbols */ for (j = 0; j <= 1; j++) { if (j) printk(KERN_NOTICE "Return addresses in stack:\n"); else printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); fp = frame; frame_no = 0; for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; addr < endstack; addr++, i++) { ret_addr = 0; if (!j && i % 8 == 0) printk(KERN_NOTICE "%p:", addr); /* if it is an odd address, or zero, just skip it */ if (*addr & 0x1 || !*addr) goto print; ins_addr = (unsigned short *)*addr; /* Go back one instruction, and see if it is a CALL */ ins_addr--; ret_addr = is_bfin_call(ins_addr); print: if (!j && stack == (unsigned long *)addr) printk("[%08x]", *addr); else if (ret_addr) if (j) { decode_address(buf, (unsigned int)*addr); if (frame == addr) { printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); continue; } printk(KERN_NOTICE " address : %s\n", buf); } else printk("<%08x>", *addr); else if (fp == addr) { if (j) frame = addr+1; else printk("(%08x)", *addr); fp = (unsigned int *)*addr; frame_no++; } else if (!j) printk(" %08x ", *addr); } if (!j) printk("\n"); } #endif } EXPORT_SYMBOL(show_stack); void dump_stack(void) { unsigned long stack; #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int tflags; #endif trace_buffer_save(tflags); dump_bfin_trace_buffer(); dump_stack_print_info(KERN_DEFAULT); show_stack(current, &stack); trace_buffer_restore(tflags); } EXPORT_SYMBOL(dump_stack);
gpl-2.0
SomethingExplosive/android_kernel_asus_flo
samples/rpmsg/rpmsg_client_sample.c
4966
2689
/* * Remote processor messaging - sample client driver * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. * * Ohad Ben-Cohen <ohad@wizery.com> * Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/rpmsg.h> #define MSG "hello world!" #define MSG_LIMIT 100 static void rpmsg_sample_cb(struct rpmsg_channel *rpdev, void *data, int len, void *priv, u32 src) { int ret; static int rx_count; dev_info(&rpdev->dev, "incoming msg %d (src: 0x%x)\n", ++rx_count, src); print_hex_dump(KERN_DEBUG, __func__, DUMP_PREFIX_NONE, 16, 1, data, len, true); /* samples should not live forever */ if (rx_count >= MSG_LIMIT) { dev_info(&rpdev->dev, "goodbye!\n"); return; } /* send a new message now */ ret = rpmsg_send(rpdev, MSG, strlen(MSG)); if (ret) dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret); } static int rpmsg_sample_probe(struct rpmsg_channel *rpdev) { int ret; dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n", rpdev->src, rpdev->dst); /* send a message to our remote processor */ ret = rpmsg_send(rpdev, MSG, strlen(MSG)); if (ret) { dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret); return ret; } return 0; } static void __devexit rpmsg_sample_remove(struct rpmsg_channel *rpdev) { dev_info(&rpdev->dev, "rpmsg sample client driver is removed\n"); } static struct rpmsg_device_id rpmsg_driver_sample_id_table[] = { { .name = "rpmsg-client-sample" }, { }, }; MODULE_DEVICE_TABLE(rpmsg, rpmsg_driver_sample_id_table); static struct rpmsg_driver rpmsg_sample_client = { .drv.name = KBUILD_MODNAME, .drv.owner = THIS_MODULE, .id_table = rpmsg_driver_sample_id_table, .probe = rpmsg_sample_probe, .callback = rpmsg_sample_cb, .remove = __devexit_p(rpmsg_sample_remove), }; static int __init rpmsg_client_sample_init(void) { return register_rpmsg_driver(&rpmsg_sample_client); } module_init(rpmsg_client_sample_init); static void __exit rpmsg_client_sample_fini(void) { unregister_rpmsg_driver(&rpmsg_sample_client); } module_exit(rpmsg_client_sample_fini); MODULE_DESCRIPTION("Remote processor messaging sample client driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
zanezam/boeffla-kernel-oos-bacon
arch/powerpc/mm/init_32.c
6758
5716
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/slab.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> #include <asm/sections.h> #include <asm/hugetlb.h> #include "mmu_decl.h" #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" #endif #endif #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE phys_addr_t total_memory; phys_addr_t total_lowmem; phys_addr_t memstart_addr = (phys_addr_t)~0ull; EXPORT_SYMBOL(memstart_addr); phys_addr_t kernstart_addr; EXPORT_SYMBOL(kernstart_addr); #ifdef CONFIG_RELOCATABLE_PPC32 /* Used in __va()/__pa() */ long long virt_phys_offset; EXPORT_SYMBOL(virt_phys_offset); #endif phys_addr_t lowmem_end_addr; int boot_mapsize; #ifdef CONFIG_PPC_PMAC unsigned long agp_special_page; EXPORT_SYMBOL(agp_special_page); #endif void MMU_init(void); /* XXX should be in current.h -- paulus */ extern struct task_struct *current_set[NR_CPUS]; /* * this tells the system to map all of ram with the segregs * (i.e. page tables) instead of the bats. * -- Cort */ int __map_without_bats; int __map_without_ltlbs; /* * This tells the system to allow ioremapping memory marked as reserved. */ int __allow_ioremap_reserved; /* max amount of low RAM to map in */ unsigned long __max_low_memory = MAX_LOW_MEM; /* * Check for command-line options that affect what MMU_init will do. */ void MMU_setup(void) { /* Check for nobats option (used in mapin_ram). */ if (strstr(cmd_line, "nobats")) { __map_without_bats = 1; } if (strstr(cmd_line, "noltlbs")) { __map_without_ltlbs = 1; } #ifdef CONFIG_DEBUG_PAGEALLOC __map_without_bats = 1; __map_without_ltlbs = 1; #endif } /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ void __init MMU_init(void) { if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); /* parse args from command line */ MMU_setup(); /* * Reserve gigantic pages for hugetlb. This MUST occur before * lowmem_end_addr is initialized below. */ reserve_hugetlb_gpages(); if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII memblock_enforce_memory_limit(memblock.memory.regions[0].size); printk(KERN_WARNING "Only using first contiguous memory region"); #else wii_memory_fixups(); #endif } total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; #ifdef CONFIG_FSL_BOOKE /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB * entries, so we need to adjust lowmem to match the amount we can map * in the fixed entries */ adjust_total_lowmem(); #endif /* CONFIG_FSL_BOOKE */ if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; lowmem_end_addr = memstart_addr + total_lowmem; #ifndef CONFIG_HIGHMEM total_memory = total_lowmem; memblock_enforce_memory_limit(total_lowmem); #endif /* CONFIG_HIGHMEM */ } /* Initialize the MMU hardware */ if (ppc_md.progress) ppc_md.progress("MMU:hw init", 0x300); MMU_init_hw(); /* Map in all of RAM starting at KERNELBASE */ if (ppc_md.progress) ppc_md.progress("MMU:mapin", 0x301); mapin_ram(); /* Initialize early top-down ioremap allocator */ ioremap_bot = IOREMAP_TOP; /* Map in I/O resources */ if (ppc_md.progress) ppc_md.progress("MMU:setio", 0x302); if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); /* From now on, btext is no longer BAT mapped if it was at all */ #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif /* Shortly after that, the entire linear mapping will be available */ memblock_set_current_limit(lowmem_end_addr); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { if (init_bootmem_done) return alloc_bootmem_pages(PAGE_SIZE); else return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); } #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors */ BUG_ON(first_memblock_base != 0); /* 8xx can only access 8MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); } #endif /* CONFIG_8xx */
gpl-2.0
zombi-x/android_kernel_lge_mako
arch/powerpc/kernel/softemu8xx.c
6758
4758
/* * Software emulation of some PPC instructions for the 8xx core. * * Copyright (C) 1998 Dan Malek (dmalek@jlc.net) * * Software floating emuation for the MPC8xx processor. I did this mostly * because it was easier than trying to get the libraries compiled for * software floating point. The goal is still to get the libraries done, * but I lost patience and needed some hacks to at least get init and * shells running. The first problem is the setjmp/longjmp that save * and restore the floating point registers. * * For this emulation, our working registers are found on the register * save area. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/interrupt.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/io.h> /* Eventually we may need a look-up table, but this works for now. */ #define LFS 48 #define LFD 50 #define LFDU 51 #define STFD 54 #define STFDU 55 #define FMR 63 void print_8xx_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; printk(" pte @ 0x%8lx: ", addr); pgd = pgd_offset(mm, addr & PAGE_MASK); if (pgd) { pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), addr & PAGE_MASK); if (pmd && pmd_present(*pmd)) { pte = pte_offset_kernel(pmd, addr & PAGE_MASK); if (pte) { printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n", (long)pgd, (long)pte, (long)pte_val(*pte)); #define pp ((long)pte_val(*pte)) printk(" RPN: %05lx PP: %lx SPS: %lx SH: %lx " "CI: %lx v: %lx\n", pp>>12, /* rpn */ (pp>>10)&3, /* pp */ (pp>>3)&1, /* small */ (pp>>2)&1, /* shared */ (pp>>1)&1, /* cache inhibit */ pp&1 /* valid */ ); #undef pp } else { printk("no pte\n"); } } else { printk("no pmd\n"); } } else { printk("no pgd\n"); } } int get_8xx_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; int retval = 0; pgd = pgd_offset(mm, addr & PAGE_MASK); if (pgd) { pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), addr & PAGE_MASK); if (pmd && pmd_present(*pmd)) { pte = pte_offset_kernel(pmd, addr & PAGE_MASK); if (pte) { retval = (int)pte_val(*pte); } } } return retval; } /* * We return 0 on success, 1 on unimplemented instruction, and EFAULT * if a load/store faulted. */ int Soft_emulate_8xx(struct pt_regs *regs) { u32 inst, instword; u32 flreg, idxreg, disp; int retval; s16 sdisp; u32 *ea, *ip; retval = 0; instword = *((u32 *)regs->nip); inst = instword >> 26; flreg = (instword >> 21) & 0x1f; idxreg = (instword >> 16) & 0x1f; disp = instword & 0xffff; ea = (u32 *)(regs->gpr[idxreg] + disp); ip = (u32 *)&current->thread.TS_FPR(flreg); switch ( inst ) { case LFD: /* this is a 16 bit quantity that is sign extended * so use a signed short here -- Cort */ sdisp = (instword & 0xffff); ea = (u32 *)(regs->gpr[idxreg] + sdisp); if (copy_from_user(ip, ea, sizeof(double))) retval = -EFAULT; break; case LFDU: if (copy_from_user(ip, ea, sizeof(double))) retval = -EFAULT; else regs->gpr[idxreg] = (u32)ea; break; case LFS: sdisp = (instword & 0xffff); ea = (u32 *)(regs->gpr[idxreg] + sdisp); if (copy_from_user(ip, ea, sizeof(float))) retval = -EFAULT; break; case STFD: /* this is a 16 bit quantity that is sign extended * so use a signed short here -- Cort */ sdisp = (instword & 0xffff); ea = (u32 *)(regs->gpr[idxreg] + sdisp); if (copy_to_user(ea, ip, sizeof(double))) retval = -EFAULT; break; case STFDU: if (copy_to_user(ea, ip, sizeof(double))) retval = -EFAULT; else regs->gpr[idxreg] = (u32)ea; break; case FMR: /* assume this is a fp move -- Cort */ memcpy(ip, &current->thread.TS_FPR((instword>>11)&0x1f), sizeof(double)); break; default: retval = 1; printk("Bad emulation %s/%d\n" " NIP: %08lx instruction: %08x opcode: %x " "A: %x B: %x C: %x code: %x rc: %x\n", current->comm,current->pid, regs->nip, instword,inst, (instword>>16)&0x1f, (instword>>11)&0x1f, (instword>>6)&0x1f, (instword>>1)&0x3ff, instword&1); { int pa; print_8xx_pte(current->mm,regs->nip); pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK; pa |= (regs->nip & ~PAGE_MASK); pa = (unsigned long)__va(pa); printk("Kernel VA for NIP %x ", pa); print_8xx_pte(current->mm,pa); } } if (retval == 0) regs->nip += 4; return retval; }
gpl-2.0
mythos234/cmkernel_zeroltexx
arch/mips/pci/pci-tx4939.c
7782
2672
/* * Based on linux/arch/mips/txx9/rbtx4939/setup.c, * and RBTX49xx patch from CELF patch archive. * * Copyright 2001, 2003-2005 MontaVista Software Inc. * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <asm/txx9/generic.h> #include <asm/txx9/tx4939.h> int __init tx4939_report_pciclk(void) { int pciclk = 0; pr_info("PCIC --%s PCICLK:", (__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66) ? " PCI66" : ""); if (__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_PCICLKEN_ALL) { pciclk = txx9_master_clock * 20 / 6; if (!(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66)) pciclk /= 2; printk(KERN_CONT "Internal(%u.%uMHz)", (pciclk + 50000) / 1000000, ((pciclk + 50000) / 100000) % 10); } else { printk(KERN_CONT "External"); pciclk = -1; } printk(KERN_CONT "\n"); return pciclk; } void __init tx4939_report_pci1clk(void) { unsigned int pciclk = txx9_master_clock * 20 / 6; pr_info("PCIC1 -- PCICLK:%u.%uMHz\n", (pciclk + 50000) / 1000000, ((pciclk + 50000) / 100000) % 10); } int __init tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot) { if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) { switch (slot) { case TX4927_PCIC_IDSEL_AD_TO_SLOT(31): if (__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ET0MODE) return TXX9_IRQ_BASE + TX4939_IR_ETH(0); break; case TX4927_PCIC_IDSEL_AD_TO_SLOT(30): if (__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ET1MODE) return TXX9_IRQ_BASE + TX4939_IR_ETH(1); break; } return 0; } return -1; } int __init tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq = tx4939_pcic1_map_irq(dev, slot); if (irq >= 0) return irq; irq = pin; /* IRQ rotation */ irq--; /* 0-3 */ irq = (irq + 33 - slot) % 4; irq++; /* 1-4 */ switch (irq) { case 1: irq = TXX9_IRQ_BASE + TX4939_IR_INTA; break; case 2: irq = TXX9_IRQ_BASE + TX4939_IR_INTB; break; case 3: irq = TXX9_IRQ_BASE + TX4939_IR_INTC; break; case 4: irq = TXX9_IRQ_BASE + TX4939_IR_INTD; break; } return irq; } void __init tx4939_setup_pcierr_irq(void) { if (request_irq(TXX9_IRQ_BASE + TX4939_IR_PCIERR, tx4927_pcierr_interrupt, 0, "PCI error", (void *)TX4939_PCIC_REG)) pr_warning("Failed to request irq for PCIERR\n"); }
gpl-2.0
whdgmawkd/NindiKernel
drivers/message/i2o/i2o_config.c
8038
26831
/* * I2O Configuration Interface Driver * * (C) Copyright 1999-2002 Red Hat * * Written by Alan Cox, Building Number Three Ltd * * Fixes/additions: * Deepak Saxena (04/20/1999): * Added basic ioctl() support * Deepak Saxena (06/07/1999): * Added software download ioctl (still testing) * Auvo Häkkinen (09/10/1999): * Changes to i2o_cfg_reply(), ioctl_parms() * Added ioct_validate() * Taneli Vähäkangas (09/30/1999): * Fixed ioctl_swdl() * Taneli Vähäkangas (10/04/1999): * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() * Deepak Saxena (11/18/1999): * Added event managmenet support * Alan Cox <alan@lxorguk.ukuu.org.uk>: * 2.4 rewrite ported to 2.5 * Markus Lidel <Markus.Lidel@shadowconnect.com>: * Added pass-thru support for Adaptec's raidutils * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/compat.h> #include <linux/slab.h> #include <asm/uaccess.h> #include "core.h" #define SG_TABLESIZE 30 static DEFINE_MUTEX(i2o_cfg_mutex); static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); static spinlock_t i2o_config_lock; #define MODINC(x,y) ((x) = ((x) + 1) % (y)) struct sg_simple_element { u32 flag_count; u32 addr_bus; }; struct i2o_cfg_info { struct file *fp; struct fasync_struct *fasync; struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; u16 q_in; // Queue head index u16 q_out; // Queue tail index u16 q_len; // Queue length u16 q_lost; // Number of lost events ulong q_id; // Event queue ID...used as tx_context struct i2o_cfg_info *next; }; static struct i2o_cfg_info *open_files = NULL; static ulong i2o_cfg_info_id = 0; static int i2o_cfg_getiops(unsigned long arg) { struct i2o_controller *c; u8 __user *user_iop_table = (void __user *)arg; u8 tmp[MAX_I2O_CONTROLLERS]; int ret = 0; memset(tmp, 0, MAX_I2O_CONTROLLERS); list_for_each_entry(c, &i2o_controllers, list) tmp[c->unit] = 1; if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS)) ret = -EFAULT; return ret; }; static int i2o_cfg_gethrt(unsigned long arg) { struct i2o_controller *c; struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; struct i2o_cmd_hrtlct kcmd; i2o_hrt *hrt; int len; u32 reslen; int ret = 0; if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) return -EFAULT; if (get_user(reslen, kcmd.reslen) < 0) return -EFAULT; if (kcmd.resbuf == NULL) return -EFAULT; c = i2o_find_iop(kcmd.iop); if (!c) return -ENXIO; hrt = (i2o_hrt *) c->hrt.virt; len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); if (put_user(len, kcmd.reslen)) ret = -EFAULT; else if (len > reslen) ret = -ENOBUFS; else if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) ret = -EFAULT; return ret; }; static int i2o_cfg_getlct(unsigned long arg) { struct i2o_controller *c; struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; struct i2o_cmd_hrtlct kcmd; i2o_lct *lct; int len; int ret = 0; u32 reslen; if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) return -EFAULT; if (get_user(reslen, kcmd.reslen) < 0) return -EFAULT; if (kcmd.resbuf == NULL) return -EFAULT; c = i2o_find_iop(kcmd.iop); if (!c) return -ENXIO; lct = (i2o_lct *) c->lct; len = (unsigned int)lct->table_size << 2; if (put_user(len, kcmd.reslen)) ret = -EFAULT; else if (len > reslen) ret = -ENOBUFS; else if (copy_to_user(kcmd.resbuf, lct, len)) ret = -EFAULT; return ret; }; static int i2o_cfg_parms(unsigned long arg, unsigned int type) { int ret = 0; struct i2o_controller *c; struct i2o_device *dev; struct i2o_cmd_psetget __user *cmd = (struct i2o_cmd_psetget __user *)arg; struct i2o_cmd_psetget kcmd; u32 reslen; u8 *ops; u8 *res; int len = 0; u32 i2o_cmd = (type == I2OPARMGET ? I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET); if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) return -EFAULT; if (get_user(reslen, kcmd.reslen)) return -EFAULT; c = i2o_find_iop(kcmd.iop); if (!c) return -ENXIO; dev = i2o_iop_find_device(c, kcmd.tid); if (!dev) return -ENXIO; ops = memdup_user(kcmd.opbuf, kcmd.oplen); if (IS_ERR(ops)) return PTR_ERR(ops); /* * It's possible to have a _very_ large table * and that the user asks for all of it at once... */ res = kmalloc(65536, GFP_KERNEL); if (!res) { kfree(ops); return -ENOMEM; } len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536); kfree(ops); if (len < 0) { kfree(res); return -EAGAIN; } if (put_user(len, kcmd.reslen)) ret = -EFAULT; else if (len > reslen) ret = -ENOBUFS; else if (copy_to_user(kcmd.resbuf, res, len)) ret = -EFAULT; kfree(res); return ret; }; static int i2o_cfg_swdl(unsigned long arg) { struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if (get_user(swlen, kxfer.swlen) < 0) return -EFAULT; if (get_user(maxfrag, kxfer.maxfrag) < 0) return -EFAULT; if (get_user(curfrag, kxfer.curfrag) < 0) return -EFAULT; if (curfrag == maxfrag) fragsize = swlen - (maxfrag - 1) * 8192; if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) return -EFAULT; c = i2o_find_iop(kxfer.iop); if (!c) return -ENXIO; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { i2o_msg_nop(c, msg); return -ENOMEM; } if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) { i2o_msg_nop(c, msg); i2o_dma_free(&c->pdev->dev, &buffer); return -EFAULT; } msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); msg->u.head[1] = cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); msg->u.head[3] = cpu_to_le32(0); msg->body[0] = cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. sw_type) << 16) | (((u32) maxfrag) << 8) | (((u32) curfrag))); msg->body[1] = cpu_to_le32(swlen); msg->body[2] = cpu_to_le32(kxfer.sw_id); msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != -ETIMEDOUT) i2o_dma_free(&c->pdev->dev, &buffer); if (status != I2O_POST_WAIT_OK) { // it fails if you try and send frags out of order // and for some yet unknown reasons too osm_info("swdl failed, DetailedStatus = %d\n", status); return status; } return 0; }; static int i2o_cfg_swul(unsigned long arg) { struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; int ret = 0; if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if (get_user(swlen, kxfer.swlen) < 0) return -EFAULT; if (get_user(maxfrag, kxfer.maxfrag) < 0) return -EFAULT; if (get_user(curfrag, kxfer.curfrag) < 0) return -EFAULT; if (curfrag == maxfrag) fragsize = swlen - (maxfrag - 1) * 8192; if (!kxfer.buf) return -EFAULT; c = i2o_find_iop(kxfer.iop); if (!c) return -ENXIO; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { i2o_msg_nop(c, msg); return -ENOMEM; } msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); msg->u.head[1] = cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); msg->u.head[3] = cpu_to_le32(0); msg->body[0] = cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); msg->body[1] = cpu_to_le32(swlen); msg->body[2] = cpu_to_le32(kxfer.sw_id); msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != I2O_POST_WAIT_OK) { if (status != -ETIMEDOUT) i2o_dma_free(&c->pdev->dev, &buffer); osm_info("swul failed, DetailedStatus = %d\n", status); return status; } if (copy_to_user(kxfer.buf, buffer.virt, fragsize)) ret = -EFAULT; i2o_dma_free(&c->pdev->dev, &buffer); return ret; } static int i2o_cfg_swdel(unsigned long arg) { struct i2o_controller *c; struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; struct i2o_message *msg; unsigned int swlen; int token; if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) return -EFAULT; if (get_user(swlen, kxfer.swlen) < 0) return -EFAULT; c = i2o_find_iop(kxfer.iop); if (!c) return -ENXIO; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); msg->u.head[3] = cpu_to_le32(0); msg->body[0] = cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); msg->body[1] = cpu_to_le32(swlen); msg->body[2] = cpu_to_le32(kxfer.sw_id); token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("swdel failed, DetailedStatus = %d\n", token); return -ETIMEDOUT; } return 0; }; static int i2o_cfg_validate(unsigned long arg) { int token; int iop = (int)arg; struct i2o_message *msg; struct i2o_controller *c; c = i2o_find_iop(iop); if (!c) return -ENXIO; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); msg->u.head[3] = cpu_to_le32(0); token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("Can't validate configuration, ErrorStatus = %d\n", token); return -ETIMEDOUT; } return 0; }; static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) { struct i2o_message *msg; struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; struct i2o_evt_id kdesc; struct i2o_controller *c; struct i2o_device *d; if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) return -EFAULT; /* IOP exists? */ c = i2o_find_iop(kdesc.iop); if (!c) return -ENXIO; /* Device exists? */ d = i2o_iop_find_device(c, kdesc.tid); if (!d) return -ENODEV; msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); msg->u.head[1] = cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid); msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); msg->body[0] = cpu_to_le32(kdesc.evt_mask); i2o_msg_post(c, msg); return 0; } static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) { struct i2o_cfg_info *p = NULL; struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg; struct i2o_evt_get kget; unsigned long flags; for (p = open_files; p; p = p->next) if (p->q_id == (ulong) fp->private_data) break; if (!p->q_len) return -ENOENT; memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); MODINC(p->q_out, I2O_EVT_Q_LEN); spin_lock_irqsave(&i2o_config_lock, flags); p->q_len--; kget.pending = p->q_len; kget.lost = p->q_lost; spin_unlock_irqrestore(&i2o_config_lock, flags); if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) return -EFAULT; return 0; } #ifdef CONFIG_COMPAT static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg) { struct i2o_cmd_passthru32 __user *cmd; struct i2o_controller *c; u32 __user *user_msg; u32 *reply = NULL; u32 __user *user_reply = NULL; u32 size = 0; u32 reply_size = 0; u32 rcode = 0; struct i2o_dma sg_list[SG_TABLESIZE]; u32 sg_offset = 0; u32 sg_count = 0; u32 i = 0; u32 sg_index = 0; i2o_status_block *sb; struct i2o_message *msg; unsigned int iop; cmd = (struct i2o_cmd_passthru32 __user *)arg; if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg)) return -EFAULT; user_msg = compat_ptr(i); c = i2o_find_iop(iop); if (!c) { osm_debug("controller %d not found\n", iop); return -ENXIO; } sb = c->status_block.virt; if (get_user(size, &user_msg[0])) { osm_warn("unable to get size!\n"); return -EFAULT; } size = size >> 16; if (size > sb->inbound_frame_size) { osm_warn("size of message > inbound_frame_size"); return -EFAULT; } user_reply = &user_msg[size]; size <<= 2; // Convert to bytes msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); rcode = -EFAULT; /* Copy in the user's I2O command */ if (copy_from_user(msg, user_msg, size)) { osm_warn("unable to copy user message\n"); goto out; } i2o_dump_message(msg); if (get_user(reply_size, &user_reply[0]) < 0) goto out; reply_size >>= 16; reply_size <<= 2; rcode = -ENOMEM; reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); goto out; } sg_offset = (msg->u.head[0] >> 4) & 0x0f; memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; if (sg_offset * 4 >= size) { rcode = -EFAULT; goto cleanup; } // TODO 64bit fix sg = (struct sg_simple_element *)((&msg->u.head[0]) + sg_offset); sg_count = (size - sg_offset * 4) / sizeof(struct sg_simple_element); if (sg_count > SG_TABLESIZE) { printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", c->name, sg_count); rcode = -EINVAL; goto cleanup; } for (i = 0; i < sg_count; i++) { int sg_size; struct i2o_dma *p; if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { printk(KERN_DEBUG "%s:Bad SG element %d - not simple (%x)\n", c->name, i, sg[i].flag_count); rcode = -EINVAL; goto cleanup; } sg_size = sg[i].flag_count & 0xffffff; p = &(sg_list[sg_index]); /* Allocate memory for the transfer */ if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { printk(KERN_DEBUG "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name, sg_size, i, sg_count); rcode = -ENOMEM; goto sg_list_cleanup; } sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { // TODO 64bit fix if (copy_from_user (p->virt, (void __user *)(unsigned long)sg[i]. addr_bus, sg_size)) { printk(KERN_DEBUG "%s: Could not copy SG buf %d FROM user\n", c->name, i); rcode = -EFAULT; goto sg_list_cleanup; } } //TODO 64bit fix sg[i].addr_bus = (u32) p->phys; } } rcode = i2o_msg_post_wait(c, msg, 60); msg = NULL; if (rcode) { reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; } if (sg_offset) { u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; /* Copy back the Scatter Gather buffers back to user space */ u32 j; // TODO 64bit fix struct sg_simple_element *sg; int sg_size; // re-acquire the original message to handle correctly the sg copy operation memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); // get user msg size in u32s if (get_user(size, &user_msg[0])) { rcode = -EFAULT; goto sg_list_cleanup; } size = size >> 16; size *= 4; /* Copy in the user's I2O command */ if (copy_from_user(rmsg, user_msg, size)) { rcode = -EFAULT; goto sg_list_cleanup; } sg_count = (size - sg_offset * 4) / sizeof(struct sg_simple_element); // TODO 64bit fix sg = (struct sg_simple_element *)(rmsg + sg_offset); for (j = 0; j < sg_count; j++) { /* Copy out the SG list to user's buffer if necessary */ if (! (sg[j]. flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { sg_size = sg[j].flag_count & 0xffffff; // TODO 64bit fix if (copy_to_user ((void __user *)(u64) sg[j].addr_bus, sg_list[j].virt, sg_size)) { printk(KERN_WARNING "%s: Could not copy %p TO user %x\n", c->name, sg_list[j].virt, sg[j].addr_bus); rcode = -EFAULT; goto sg_list_cleanup; } } } } sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { printk(KERN_WARNING "%s: Could not copy message context FROM user\n", c->name); rcode = -EFAULT; } if (copy_to_user(user_reply, reply, reply_size)) { printk(KERN_WARNING "%s: Could not copy reply TO user\n", c->name); rcode = -EFAULT; } } for (i = 0; i < sg_index; i++) i2o_dma_free(&c->pdev->dev, &sg_list[i]); cleanup: kfree(reply); out: if (msg) i2o_msg_nop(c, msg); return rcode; } static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { int ret; mutex_lock(&i2o_cfg_mutex); switch (cmd) { case I2OGETIOPS: ret = i2o_cfg_ioctl(file, cmd, arg); break; case I2OPASSTHRU32: ret = i2o_cfg_passthru32(file, cmd, arg); break; default: ret = -ENOIOCTLCMD; break; } mutex_unlock(&i2o_cfg_mutex); return ret; } #endif #ifdef CONFIG_I2O_EXT_ADAPTEC static int i2o_cfg_passthru(unsigned long arg) { struct i2o_cmd_passthru __user *cmd = (struct i2o_cmd_passthru __user *)arg; struct i2o_controller *c; u32 __user *user_msg; u32 *reply = NULL; u32 __user *user_reply = NULL; u32 size = 0; u32 reply_size = 0; u32 rcode = 0; struct i2o_dma sg_list[SG_TABLESIZE]; u32 sg_offset = 0; u32 sg_count = 0; int sg_index = 0; u32 i = 0; i2o_status_block *sb; struct i2o_message *msg; unsigned int iop; if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) return -EFAULT; c = i2o_find_iop(iop); if (!c) { osm_warn("controller %d not found\n", iop); return -ENXIO; } sb = c->status_block.virt; if (get_user(size, &user_msg[0])) return -EFAULT; size = size >> 16; if (size > sb->inbound_frame_size) { osm_warn("size of message > inbound_frame_size"); return -EFAULT; } user_reply = &user_msg[size]; size <<= 2; // Convert to bytes msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); if (IS_ERR(msg)) return PTR_ERR(msg); rcode = -EFAULT; /* Copy in the user's I2O command */ if (copy_from_user(msg, user_msg, size)) goto out; if (get_user(reply_size, &user_reply[0]) < 0) goto out; reply_size >>= 16; reply_size <<= 2; reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); rcode = -ENOMEM; goto out; } sg_offset = (msg->u.head[0] >> 4) & 0x0f; memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; struct i2o_dma *p; if (sg_offset * 4 >= size) { rcode = -EFAULT; goto cleanup; } // TODO 64bit fix sg = (struct sg_simple_element *)((&msg->u.head[0]) + sg_offset); sg_count = (size - sg_offset * 4) / sizeof(struct sg_simple_element); if (sg_count > SG_TABLESIZE) { printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", c->name, sg_count); rcode = -EINVAL; goto cleanup; } for (i = 0; i < sg_count; i++) { int sg_size; if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { printk(KERN_DEBUG "%s:Bad SG element %d - not simple (%x)\n", c->name, i, sg[i].flag_count); rcode = -EINVAL; goto sg_list_cleanup; } sg_size = sg[i].flag_count & 0xffffff; p = &(sg_list[sg_index]); if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { /* Allocate memory for the transfer */ printk(KERN_DEBUG "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name, sg_size, i, sg_count); rcode = -ENOMEM; goto sg_list_cleanup; } sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { // TODO 64bit fix if (copy_from_user (p->virt, (void __user *)sg[i].addr_bus, sg_size)) { printk(KERN_DEBUG "%s: Could not copy SG buf %d FROM user\n", c->name, i); rcode = -EFAULT; goto sg_list_cleanup; } } sg[i].addr_bus = p->phys; } } rcode = i2o_msg_post_wait(c, msg, 60); msg = NULL; if (rcode) { reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; } if (sg_offset) { u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; /* Copy back the Scatter Gather buffers back to user space */ u32 j; // TODO 64bit fix struct sg_simple_element *sg; int sg_size; // re-acquire the original message to handle correctly the sg copy operation memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); // get user msg size in u32s if (get_user(size, &user_msg[0])) { rcode = -EFAULT; goto sg_list_cleanup; } size = size >> 16; size *= 4; /* Copy in the user's I2O command */ if (copy_from_user(rmsg, user_msg, size)) { rcode = -EFAULT; goto sg_list_cleanup; } sg_count = (size - sg_offset * 4) / sizeof(struct sg_simple_element); // TODO 64bit fix sg = (struct sg_simple_element *)(rmsg + sg_offset); for (j = 0; j < sg_count; j++) { /* Copy out the SG list to user's buffer if necessary */ if (! (sg[j]. flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { sg_size = sg[j].flag_count & 0xffffff; // TODO 64bit fix if (copy_to_user ((void __user *)sg[j].addr_bus, sg_list[j].virt, sg_size)) { printk(KERN_WARNING "%s: Could not copy %p TO user %x\n", c->name, sg_list[j].virt, sg[j].addr_bus); rcode = -EFAULT; goto sg_list_cleanup; } } } } sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { printk(KERN_WARNING "%s: Could not copy message context FROM user\n", c->name); rcode = -EFAULT; } if (copy_to_user(user_reply, reply, reply_size)) { printk(KERN_WARNING "%s: Could not copy reply TO user\n", c->name); rcode = -EFAULT; } } for (i = 0; i < sg_index; i++) i2o_dma_free(&c->pdev->dev, &sg_list[i]); cleanup: kfree(reply); out: if (msg) i2o_msg_nop(c, msg); return rcode; } #endif /* * IOCTL Handler */ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; mutex_lock(&i2o_cfg_mutex); switch (cmd) { case I2OGETIOPS: ret = i2o_cfg_getiops(arg); break; case I2OHRTGET: ret = i2o_cfg_gethrt(arg); break; case I2OLCTGET: ret = i2o_cfg_getlct(arg); break; case I2OPARMSET: ret = i2o_cfg_parms(arg, I2OPARMSET); break; case I2OPARMGET: ret = i2o_cfg_parms(arg, I2OPARMGET); break; case I2OSWDL: ret = i2o_cfg_swdl(arg); break; case I2OSWUL: ret = i2o_cfg_swul(arg); break; case I2OSWDEL: ret = i2o_cfg_swdel(arg); break; case I2OVALIDATE: ret = i2o_cfg_validate(arg); break; case I2OEVTREG: ret = i2o_cfg_evt_reg(arg, fp); break; case I2OEVTGET: ret = i2o_cfg_evt_get(arg, fp); break; #ifdef CONFIG_I2O_EXT_ADAPTEC case I2OPASSTHRU: ret = i2o_cfg_passthru(arg); break; #endif default: osm_debug("unknown ioctl called!\n"); ret = -EINVAL; } mutex_unlock(&i2o_cfg_mutex); return ret; } static int cfg_open(struct inode *inode, struct file *file) { struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); unsigned long flags; if (!tmp) return -ENOMEM; mutex_lock(&i2o_cfg_mutex); file->private_data = (void *)(i2o_cfg_info_id++); tmp->fp = file; tmp->fasync = NULL; tmp->q_id = (ulong) file->private_data; tmp->q_len = 0; tmp->q_in = 0; tmp->q_out = 0; tmp->q_lost = 0; tmp->next = open_files; spin_lock_irqsave(&i2o_config_lock, flags); open_files = tmp; spin_unlock_irqrestore(&i2o_config_lock, flags); mutex_unlock(&i2o_cfg_mutex); return 0; } static int cfg_fasync(int fd, struct file *fp, int on) { ulong id = (ulong) fp->private_data; struct i2o_cfg_info *p; int ret = -EBADF; mutex_lock(&i2o_cfg_mutex); for (p = open_files; p; p = p->next) if (p->q_id == id) break; if (p) ret = fasync_helper(fd, fp, on, &p->fasync); mutex_unlock(&i2o_cfg_mutex); return ret; } static int cfg_release(struct inode *inode, struct file *file) { ulong id = (ulong) file->private_data; struct i2o_cfg_info *p, **q; unsigned long flags; mutex_lock(&i2o_cfg_mutex); spin_lock_irqsave(&i2o_config_lock, flags); for (q = &open_files; (p = *q) != NULL; q = &p->next) { if (p->q_id == id) { *q = p->next; kfree(p); break; } } spin_unlock_irqrestore(&i2o_config_lock, flags); mutex_unlock(&i2o_cfg_mutex); return 0; } static const struct file_operations config_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = i2o_cfg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = i2o_cfg_compat_ioctl, #endif .open = cfg_open, .release = cfg_release, .fasync = cfg_fasync, }; static struct miscdevice i2o_miscdev = { I2O_MINOR, "i2octl", &config_fops }; static int __init i2o_config_old_init(void) { spin_lock_init(&i2o_config_lock); if (misc_register(&i2o_miscdev) < 0) { osm_err("can't register device.\n"); return -EBUSY; } return 0; } static void i2o_config_old_exit(void) { misc_deregister(&i2o_miscdev); } MODULE_AUTHOR("Red Hat Software");
gpl-2.0
zarboz/android_kernel_htc_dlx
virt/fs/ocfs2/slot_map.c
8038
12491
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * slot_map.c * * * * Copyright (C) 2002, 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "dlmglue.h" #include "extent_map.h" #include "heartbeat.h" #include "inode.h" #include "slot_map.h" #include "super.h" #include "sysfile.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" struct ocfs2_slot { int sl_valid; unsigned int sl_node_num; }; struct ocfs2_slot_info { int si_extended; int si_slots_per_block; struct inode *si_inode; unsigned int si_blocks; struct buffer_head **si_bh; unsigned int si_num_slots; struct ocfs2_slot *si_slots; }; static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si, unsigned int node_num); static void ocfs2_invalidate_slot(struct ocfs2_slot_info *si, int slot_num) { BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots)); si->si_slots[slot_num].sl_valid = 0; } static void ocfs2_set_slot(struct ocfs2_slot_info *si, int slot_num, unsigned int node_num) { BUG_ON((slot_num < 0) || (slot_num >= si->si_num_slots)); si->si_slots[slot_num].sl_valid = 1; si->si_slots[slot_num].sl_node_num = node_num; } /* This version is for the extended slot map */ static void ocfs2_update_slot_info_extended(struct ocfs2_slot_info *si) { int b, i, slotno; struct ocfs2_slot_map_extended *se; slotno = 0; for (b = 0; b < si->si_blocks; b++) { se = (struct ocfs2_slot_map_extended *)si->si_bh[b]->b_data; for (i = 0; (i < si->si_slots_per_block) && (slotno < si->si_num_slots); i++, slotno++) { if (se->se_slots[i].es_valid) ocfs2_set_slot(si, slotno, le32_to_cpu(se->se_slots[i].es_node_num)); else ocfs2_invalidate_slot(si, slotno); } } } /* * Post the slot information on disk into our slot_info struct. * Must be protected by osb_lock. */ static void ocfs2_update_slot_info_old(struct ocfs2_slot_info *si) { int i; struct ocfs2_slot_map *sm; sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data; for (i = 0; i < si->si_num_slots; i++) { if (le16_to_cpu(sm->sm_slots[i]) == (u16)OCFS2_INVALID_SLOT) ocfs2_invalidate_slot(si, i); else ocfs2_set_slot(si, i, le16_to_cpu(sm->sm_slots[i])); } } static void ocfs2_update_slot_info(struct ocfs2_slot_info *si) { /* * The slot data will have been refreshed when ocfs2_super_lock * was taken. */ if (si->si_extended) ocfs2_update_slot_info_extended(si); else ocfs2_update_slot_info_old(si); } int ocfs2_refresh_slot_info(struct ocfs2_super *osb) { int ret; struct ocfs2_slot_info *si = osb->slot_info; if (si == NULL) return 0; BUG_ON(si->si_blocks == 0); BUG_ON(si->si_bh == NULL); trace_ocfs2_refresh_slot_info(si->si_blocks); /* * We pass -1 as blocknr because we expect all of si->si_bh to * be !NULL. Thus, ocfs2_read_blocks() will ignore blocknr. If * this is not true, the read of -1 (UINT64_MAX) will fail. */ ret = ocfs2_read_blocks(INODE_CACHE(si->si_inode), -1, si->si_blocks, si->si_bh, OCFS2_BH_IGNORE_CACHE, NULL); if (ret == 0) { spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); spin_unlock(&osb->osb_lock); } return ret; } /* post the our slot info stuff into it's destination bh and write it * out. */ static void ocfs2_update_disk_slot_extended(struct ocfs2_slot_info *si, int slot_num, struct buffer_head **bh) { int blkind = slot_num / si->si_slots_per_block; int slotno = slot_num % si->si_slots_per_block; struct ocfs2_slot_map_extended *se; BUG_ON(blkind >= si->si_blocks); se = (struct ocfs2_slot_map_extended *)si->si_bh[blkind]->b_data; se->se_slots[slotno].es_valid = si->si_slots[slot_num].sl_valid; if (si->si_slots[slot_num].sl_valid) se->se_slots[slotno].es_node_num = cpu_to_le32(si->si_slots[slot_num].sl_node_num); *bh = si->si_bh[blkind]; } static void ocfs2_update_disk_slot_old(struct ocfs2_slot_info *si, int slot_num, struct buffer_head **bh) { int i; struct ocfs2_slot_map *sm; sm = (struct ocfs2_slot_map *)si->si_bh[0]->b_data; for (i = 0; i < si->si_num_slots; i++) { if (si->si_slots[i].sl_valid) sm->sm_slots[i] = cpu_to_le16(si->si_slots[i].sl_node_num); else sm->sm_slots[i] = cpu_to_le16(OCFS2_INVALID_SLOT); } *bh = si->si_bh[0]; } static int ocfs2_update_disk_slot(struct ocfs2_super *osb, struct ocfs2_slot_info *si, int slot_num) { int status; struct buffer_head *bh; spin_lock(&osb->osb_lock); if (si->si_extended) ocfs2_update_disk_slot_extended(si, slot_num, &bh); else ocfs2_update_disk_slot_old(si, slot_num, &bh); spin_unlock(&osb->osb_lock); status = ocfs2_write_block(osb, bh, INODE_CACHE(si->si_inode)); if (status < 0) mlog_errno(status); return status; } /* * Calculate how many bytes are needed by the slot map. Returns * an error if the slot map file is too small. */ static int ocfs2_slot_map_physical_size(struct ocfs2_super *osb, struct inode *inode, unsigned long long *bytes) { unsigned long long bytes_needed; if (ocfs2_uses_extended_slot_map(osb)) { bytes_needed = osb->max_slots * sizeof(struct ocfs2_extended_slot); } else { bytes_needed = osb->max_slots * sizeof(__le16); } if (bytes_needed > i_size_read(inode)) { mlog(ML_ERROR, "Slot map file is too small! (size %llu, needed %llu)\n", i_size_read(inode), bytes_needed); return -ENOSPC; } *bytes = bytes_needed; return 0; } /* try to find global node in the slot info. Returns -ENOENT * if nothing is found. */ static int __ocfs2_node_num_to_slot(struct ocfs2_slot_info *si, unsigned int node_num) { int i, ret = -ENOENT; for(i = 0; i < si->si_num_slots; i++) { if (si->si_slots[i].sl_valid && (node_num == si->si_slots[i].sl_node_num)) { ret = i; break; } } return ret; } static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si, int preferred) { int i, ret = -ENOSPC; if ((preferred >= 0) && (preferred < si->si_num_slots)) { if (!si->si_slots[preferred].sl_valid) { ret = preferred; goto out; } } for(i = 0; i < si->si_num_slots; i++) { if (!si->si_slots[i].sl_valid) { ret = i; break; } } out: return ret; } int ocfs2_node_num_to_slot(struct ocfs2_super *osb, unsigned int node_num) { int slot; struct ocfs2_slot_info *si = osb->slot_info; spin_lock(&osb->osb_lock); slot = __ocfs2_node_num_to_slot(si, node_num); spin_unlock(&osb->osb_lock); return slot; } int ocfs2_slot_to_node_num_locked(struct ocfs2_super *osb, int slot_num, unsigned int *node_num) { struct ocfs2_slot_info *si = osb->slot_info; assert_spin_locked(&osb->osb_lock); BUG_ON(slot_num < 0); BUG_ON(slot_num > osb->max_slots); if (!si->si_slots[slot_num].sl_valid) return -ENOENT; *node_num = si->si_slots[slot_num].sl_node_num; return 0; } static void __ocfs2_free_slot_info(struct ocfs2_slot_info *si) { unsigned int i; if (si == NULL) return; if (si->si_inode) iput(si->si_inode); if (si->si_bh) { for (i = 0; i < si->si_blocks; i++) { if (si->si_bh[i]) { brelse(si->si_bh[i]); si->si_bh[i] = NULL; } } kfree(si->si_bh); } kfree(si); } int ocfs2_clear_slot(struct ocfs2_super *osb, int slot_num) { struct ocfs2_slot_info *si = osb->slot_info; if (si == NULL) return 0; spin_lock(&osb->osb_lock); ocfs2_invalidate_slot(si, slot_num); spin_unlock(&osb->osb_lock); return ocfs2_update_disk_slot(osb, osb->slot_info, slot_num); } static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, struct ocfs2_slot_info *si) { int status = 0; u64 blkno; unsigned long long blocks, bytes = 0; unsigned int i; struct buffer_head *bh; status = ocfs2_slot_map_physical_size(osb, si->si_inode, &bytes); if (status) goto bail; blocks = ocfs2_blocks_for_bytes(si->si_inode->i_sb, bytes); BUG_ON(blocks > UINT_MAX); si->si_blocks = blocks; if (!si->si_blocks) goto bail; if (si->si_extended) si->si_slots_per_block = (osb->sb->s_blocksize / sizeof(struct ocfs2_extended_slot)); else si->si_slots_per_block = osb->sb->s_blocksize / sizeof(__le16); /* The size checks above should ensure this */ BUG_ON((osb->max_slots / si->si_slots_per_block) > blocks); trace_ocfs2_map_slot_buffers(bytes, si->si_blocks); si->si_bh = kzalloc(sizeof(struct buffer_head *) * si->si_blocks, GFP_KERNEL); if (!si->si_bh) { status = -ENOMEM; mlog_errno(status); goto bail; } for (i = 0; i < si->si_blocks; i++) { status = ocfs2_extent_map_get_blocks(si->si_inode, i, &blkno, NULL, NULL); if (status < 0) { mlog_errno(status); goto bail; } trace_ocfs2_map_slot_buffers_block((unsigned long long)blkno, i); bh = NULL; /* Acquire a fresh bh */ status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, 1, &bh, OCFS2_BH_IGNORE_CACHE, NULL); if (status < 0) { mlog_errno(status); goto bail; } si->si_bh[i] = bh; } bail: return status; } int ocfs2_init_slot_info(struct ocfs2_super *osb) { int status; struct inode *inode = NULL; struct ocfs2_slot_info *si; si = kzalloc(sizeof(struct ocfs2_slot_info) + (sizeof(struct ocfs2_slot) * osb->max_slots), GFP_KERNEL); if (!si) { status = -ENOMEM; mlog_errno(status); goto bail; } si->si_extended = ocfs2_uses_extended_slot_map(osb); si->si_num_slots = osb->max_slots; si->si_slots = (struct ocfs2_slot *)((char *)si + sizeof(struct ocfs2_slot_info)); inode = ocfs2_get_system_file_inode(osb, SLOT_MAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!inode) { status = -EINVAL; mlog_errno(status); goto bail; } si->si_inode = inode; status = ocfs2_map_slot_buffers(osb, si); if (status < 0) { mlog_errno(status); goto bail; } osb->slot_info = (struct ocfs2_slot_info *)si; bail: if (status < 0 && si) __ocfs2_free_slot_info(si); return status; } void ocfs2_free_slot_info(struct ocfs2_super *osb) { struct ocfs2_slot_info *si = osb->slot_info; osb->slot_info = NULL; __ocfs2_free_slot_info(si); } int ocfs2_find_slot(struct ocfs2_super *osb) { int status; int slot; struct ocfs2_slot_info *si; si = osb->slot_info; spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); /* search for ourselves first and take the slot if it already * exists. Perhaps we need to mark this in a variable for our * own journal recovery? Possibly not, though we certainly * need to warn to the user */ slot = __ocfs2_node_num_to_slot(si, osb->node_num); if (slot < 0) { /* if no slot yet, then just take 1st available * one. */ slot = __ocfs2_find_empty_slot(si, osb->preferred_slot); if (slot < 0) { spin_unlock(&osb->osb_lock); mlog(ML_ERROR, "no free slots available!\n"); status = -EINVAL; goto bail; } } else printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " "allocated to this node!\n", slot, osb->dev_str); ocfs2_set_slot(si, slot, osb->node_num); osb->slot_num = slot; spin_unlock(&osb->osb_lock); trace_ocfs2_find_slot(osb->slot_num); status = ocfs2_update_disk_slot(osb, si, osb->slot_num); if (status < 0) mlog_errno(status); bail: return status; } void ocfs2_put_slot(struct ocfs2_super *osb) { int status, slot_num; struct ocfs2_slot_info *si = osb->slot_info; if (!si) return; spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); slot_num = osb->slot_num; ocfs2_invalidate_slot(si, osb->slot_num); osb->slot_num = OCFS2_INVALID_SLOT; spin_unlock(&osb->osb_lock); status = ocfs2_update_disk_slot(osb, si, slot_num); if (status < 0) { mlog_errno(status); goto bail; } bail: ocfs2_free_slot_info(osb); }
gpl-2.0
Slayjay78/android_kernel_lge_hammerhead
arch/arm/mach-pxa/pxa930.c
8294
5198
/* * linux/arch/arm/mach-pxa/pxa930.c * * Code specific to PXA930 * * Copyright (C) 2007-2008 Marvell Internation Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/dma-mapping.h> #include <mach/pxa930.h> static struct mfp_addr_map pxa930_mfp_addr_map[] __initdata = { MFP_ADDR(GPIO0, 0x02e0), MFP_ADDR(GPIO1, 0x02dc), MFP_ADDR(GPIO2, 0x02e8), MFP_ADDR(GPIO3, 0x02d8), MFP_ADDR(GPIO4, 0x02e4), MFP_ADDR(GPIO5, 0x02ec), MFP_ADDR(GPIO6, 0x02f8), MFP_ADDR(GPIO7, 0x02fc), MFP_ADDR(GPIO8, 0x0300), MFP_ADDR(GPIO9, 0x02d4), MFP_ADDR(GPIO10, 0x02f4), MFP_ADDR(GPIO11, 0x02f0), MFP_ADDR(GPIO12, 0x0304), MFP_ADDR(GPIO13, 0x0310), MFP_ADDR(GPIO14, 0x0308), MFP_ADDR(GPIO15, 0x030c), MFP_ADDR(GPIO16, 0x04e8), MFP_ADDR(GPIO17, 0x04f4), MFP_ADDR(GPIO18, 0x04f8), MFP_ADDR(GPIO19, 0x04fc), MFP_ADDR(GPIO20, 0x0518), MFP_ADDR(GPIO21, 0x051c), MFP_ADDR(GPIO22, 0x04ec), MFP_ADDR(GPIO23, 0x0500), MFP_ADDR(GPIO24, 0x04f0), MFP_ADDR(GPIO25, 0x0504), MFP_ADDR(GPIO26, 0x0510), MFP_ADDR(GPIO27, 0x0514), MFP_ADDR(GPIO28, 0x0520), MFP_ADDR(GPIO29, 0x0600), MFP_ADDR(GPIO30, 0x0618), MFP_ADDR(GPIO31, 0x0610), MFP_ADDR(GPIO32, 0x060c), MFP_ADDR(GPIO33, 0x061c), MFP_ADDR(GPIO34, 0x0620), MFP_ADDR(GPIO35, 0x0628), MFP_ADDR(GPIO36, 0x062c), MFP_ADDR(GPIO37, 0x0630), MFP_ADDR(GPIO38, 0x0634), MFP_ADDR(GPIO39, 0x0638), MFP_ADDR(GPIO40, 0x063c), MFP_ADDR(GPIO41, 0x0614), MFP_ADDR(GPIO42, 0x0624), MFP_ADDR(GPIO43, 0x0608), MFP_ADDR(GPIO44, 0x0604), MFP_ADDR(GPIO45, 0x050c), MFP_ADDR(GPIO46, 0x0508), MFP_ADDR(GPIO47, 0x02bc), MFP_ADDR(GPIO48, 0x02b4), MFP_ADDR(GPIO49, 0x02b8), MFP_ADDR(GPIO50, 0x02c8), MFP_ADDR(GPIO51, 0x02c0), MFP_ADDR(GPIO52, 0x02c4), MFP_ADDR(GPIO53, 0x02d0), MFP_ADDR(GPIO54, 0x02cc), MFP_ADDR(GPIO55, 0x029c), MFP_ADDR(GPIO56, 0x02a0), MFP_ADDR(GPIO57, 0x0294), MFP_ADDR(GPIO58, 0x0298), MFP_ADDR(GPIO59, 0x02a4), MFP_ADDR(GPIO60, 0x02a8), MFP_ADDR(GPIO61, 0x02b0), MFP_ADDR(GPIO62, 0x02ac), MFP_ADDR(GPIO63, 0x0640), MFP_ADDR(GPIO64, 0x065c), MFP_ADDR(GPIO65, 0x0648), MFP_ADDR(GPIO66, 0x0644), MFP_ADDR(GPIO67, 0x0674), MFP_ADDR(GPIO68, 0x0658), MFP_ADDR(GPIO69, 0x0654), MFP_ADDR(GPIO70, 0x0660), MFP_ADDR(GPIO71, 0x0668), MFP_ADDR(GPIO72, 0x0664), MFP_ADDR(GPIO73, 0x0650), MFP_ADDR(GPIO74, 0x066c), MFP_ADDR(GPIO75, 0x064c), MFP_ADDR(GPIO76, 0x0670), MFP_ADDR(GPIO77, 0x0678), MFP_ADDR(GPIO78, 0x067c), MFP_ADDR(GPIO79, 0x0694), MFP_ADDR(GPIO80, 0x069c), MFP_ADDR(GPIO81, 0x06a0), MFP_ADDR(GPIO82, 0x06a4), MFP_ADDR(GPIO83, 0x0698), MFP_ADDR(GPIO84, 0x06bc), MFP_ADDR(GPIO85, 0x06b4), MFP_ADDR(GPIO86, 0x06b0), MFP_ADDR(GPIO87, 0x06c0), MFP_ADDR(GPIO88, 0x06c4), MFP_ADDR(GPIO89, 0x06ac), MFP_ADDR(GPIO90, 0x0680), MFP_ADDR(GPIO91, 0x0684), MFP_ADDR(GPIO92, 0x0688), MFP_ADDR(GPIO93, 0x0690), MFP_ADDR(GPIO94, 0x068c), MFP_ADDR(GPIO95, 0x06a8), MFP_ADDR(GPIO96, 0x06b8), MFP_ADDR(GPIO97, 0x0410), MFP_ADDR(GPIO98, 0x0418), MFP_ADDR(GPIO99, 0x041c), MFP_ADDR(GPIO100, 0x0414), MFP_ADDR(GPIO101, 0x0408), MFP_ADDR(GPIO102, 0x0324), MFP_ADDR(GPIO103, 0x040c), MFP_ADDR(GPIO104, 0x0400), MFP_ADDR(GPIO105, 0x0328), MFP_ADDR(GPIO106, 0x0404), MFP_ADDR(nXCVREN, 0x0204), MFP_ADDR(DF_CLE_nOE, 0x020c), MFP_ADDR(DF_nADV1_ALE, 0x0218), MFP_ADDR(DF_SCLK_E, 0x0214), MFP_ADDR(DF_SCLK_S, 0x0210), MFP_ADDR(nBE0, 0x021c), MFP_ADDR(nBE1, 0x0220), MFP_ADDR(DF_nADV2_ALE, 0x0224), MFP_ADDR(DF_INT_RnB, 0x0228), MFP_ADDR(DF_nCS0, 0x022c), MFP_ADDR(DF_nCS1, 0x0230), MFP_ADDR(nLUA, 0x0254), MFP_ADDR(nLLA, 0x0258), MFP_ADDR(DF_nWE, 0x0234), MFP_ADDR(DF_nRE_nOE, 0x0238), MFP_ADDR(DF_ADDR0, 0x024c), MFP_ADDR(DF_ADDR1, 0x0250), MFP_ADDR(DF_ADDR2, 0x025c), MFP_ADDR(DF_ADDR3, 0x0260), MFP_ADDR(DF_IO0, 0x023c), MFP_ADDR(DF_IO1, 0x0240), MFP_ADDR(DF_IO2, 0x0244), MFP_ADDR(DF_IO3, 0x0248), MFP_ADDR(DF_IO4, 0x0264), MFP_ADDR(DF_IO5, 0x0268), MFP_ADDR(DF_IO6, 0x026c), MFP_ADDR(DF_IO7, 0x0270), MFP_ADDR(DF_IO8, 0x0274), MFP_ADDR(DF_IO9, 0x0278), MFP_ADDR(DF_IO10, 0x027c), MFP_ADDR(DF_IO11, 0x0280), MFP_ADDR(DF_IO12, 0x0284), MFP_ADDR(DF_IO13, 0x0288), MFP_ADDR(DF_IO14, 0x028c), MFP_ADDR(DF_IO15, 0x0290), MFP_ADDR(GSIM_UIO, 0x0314), MFP_ADDR(GSIM_UCLK, 0x0318), MFP_ADDR(GSIM_UDET, 0x031c), MFP_ADDR(GSIM_nURST, 0x0320), MFP_ADDR(PMIC_INT, 0x06c8), MFP_ADDR(RDY, 0x0200), MFP_ADDR_END, }; static struct mfp_addr_map pxa935_mfp_addr_map[] __initdata = { MFP_ADDR(GPIO159, 0x0524), MFP_ADDR(GPIO163, 0x0534), MFP_ADDR(GPIO167, 0x0544), MFP_ADDR(GPIO168, 0x0548), MFP_ADDR(GPIO169, 0x054c), MFP_ADDR(GPIO170, 0x0550), MFP_ADDR(GPIO171, 0x0554), MFP_ADDR(GPIO172, 0x0558), MFP_ADDR(GPIO173, 0x055c), MFP_ADDR_END, }; static int __init pxa930_init(void) { if (cpu_is_pxa93x()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa930_mfp_addr_map); } if (cpu_is_pxa935()) mfp_init_addr(pxa935_mfp_addr_map); return 0; } core_initcall(pxa930_init);
gpl-2.0
maikelwever/android_kernel_htc_msm8660-caf
Documentation/connector/cn_test.c
9062
4694
/* * cn_test.c * * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) "cn_test: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/connector.h> static struct cb_id cn_test_id = { CN_NETLINK_USERS + 3, 0x456 }; static char cn_test_name[] = "cn_test"; static struct sock *nls; static struct timer_list cn_test_timer; static void cn_test_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { pr_info("%s: %lu: idx=%x, val=%x, seq=%u, ack=%u, len=%d: %s.\n", __func__, jiffies, msg->id.idx, msg->id.val, msg->seq, msg->ack, msg->len, msg->len ? (char *)msg->data : ""); } /* * Do not remove this function even if no one is using it as * this is an example of how to get notifications about new * connector user registration */ #if 0 static int cn_test_want_notify(void) { struct cn_ctl_msg *ctl; struct cn_notify_req *req; struct cn_msg *msg = NULL; int size, size0; struct sk_buff *skb; struct nlmsghdr *nlh; u32 group = 1; size0 = sizeof(*msg) + sizeof(*ctl) + 3 * sizeof(*req); size = NLMSG_SPACE(size0); skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { pr_err("failed to allocate new skb with size=%u\n", size); return -ENOMEM; } nlh = NLMSG_PUT(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh)); msg = (struct cn_msg *)NLMSG_DATA(nlh); memset(msg, 0, size0); msg->id.idx = -1; msg->id.val = -1; msg->seq = 0x123; msg->ack = 0x345; msg->len = size0 - sizeof(*msg); ctl = (struct cn_ctl_msg *)(msg + 1); ctl->idx_notify_num = 1; ctl->val_notify_num = 2; ctl->group = group; ctl->len = msg->len - sizeof(*ctl); req = (struct cn_notify_req *)(ctl + 1); /* * Idx. */ req->first = cn_test_id.idx; req->range = 10; /* * Val 0. */ req++; req->first = cn_test_id.val; req->range = 10; /* * Val 1. */ req++; req->first = cn_test_id.val + 20; req->range = 10; NETLINK_CB(skb).dst_group = ctl->group; //netlink_broadcast(nls, skb, 0, ctl->group, GFP_ATOMIC); netlink_unicast(nls, skb, 0, 0); pr_info("request was sent: group=0x%x\n", ctl->group); return 0; nlmsg_failure: pr_err("failed to send %u.%u\n", msg->seq, msg->ack); kfree_skb(skb); return -EINVAL; } #endif static u32 cn_test_timer_counter; static void cn_test_timer_func(unsigned long __data) { struct cn_msg *m; char data[32]; pr_debug("%s: timer fired with data %lu\n", __func__, __data); m = kzalloc(sizeof(*m) + sizeof(data), GFP_ATOMIC); if (m) { memcpy(&m->id, &cn_test_id, sizeof(m->id)); m->seq = cn_test_timer_counter; m->len = sizeof(data); m->len = scnprintf(data, sizeof(data), "counter = %u", cn_test_timer_counter) + 1; memcpy(m + 1, data, m->len); cn_netlink_send(m, 0, GFP_ATOMIC); kfree(m); } cn_test_timer_counter++; mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000)); } static int cn_test_init(void) { int err; err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback); if (err) goto err_out; cn_test_id.val++; err = cn_add_callback(&cn_test_id, cn_test_name, cn_test_callback); if (err) { cn_del_callback(&cn_test_id); goto err_out; } setup_timer(&cn_test_timer, cn_test_timer_func, 0); mod_timer(&cn_test_timer, jiffies + msecs_to_jiffies(1000)); pr_info("initialized with id={%u.%u}\n", cn_test_id.idx, cn_test_id.val); return 0; err_out: if (nls && nls->sk_socket) sock_release(nls->sk_socket); return err; } static void cn_test_fini(void) { del_timer_sync(&cn_test_timer); cn_del_callback(&cn_test_id); cn_test_id.val--; cn_del_callback(&cn_test_id); if (nls && nls->sk_socket) sock_release(nls->sk_socket); } module_init(cn_test_init); module_exit(cn_test_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Connector's test module");
gpl-2.0
manishj-patel/netbook_kernel_3.4.5_plus
net/atm/raw.c
11878
1887
/* net/atm/raw.c - Raw AAL0 and AAL5 transports */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/module.h> #include <linux/atmdev.h> #include <linux/capability.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <linux/slab.h> #include "common.h" #include "protocols.h" /* * SKB == NULL indicates that the link is being closed */ static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb) { if (skb) { struct sock *sk = sk_atm(vcc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } } static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb) { struct sock *sk = sk_atm(vcc); pr_debug("(%d) %d -= %d\n", vcc->vci, sk_wmem_alloc_get(sk), skb->truesize); atomic_sub(skb->truesize, &sk->sk_wmem_alloc); dev_kfree_skb_any(skb); sk->sk_write_space(sk); } static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb) { /* * Note that if vpi/vci are _ANY or _UNSPEC the below will * still work */ if (!capable(CAP_NET_ADMIN) && (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) != ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT))) { kfree_skb(skb); return -EADDRNOTAVAIL; } return vcc->dev->ops->send(vcc, skb); } int atm_init_aal0(struct atm_vcc *vcc) { vcc->push = atm_push_raw; vcc->pop = atm_pop_raw; vcc->push_oam = NULL; vcc->send = atm_send_aal0; return 0; } int atm_init_aal34(struct atm_vcc *vcc) { vcc->push = atm_push_raw; vcc->pop = atm_pop_raw; vcc->push_oam = NULL; vcc->send = vcc->dev->ops->send; return 0; } int atm_init_aal5(struct atm_vcc *vcc) { vcc->push = atm_push_raw; vcc->pop = atm_pop_raw; vcc->push_oam = NULL; vcc->send = vcc->dev->ops->send; return 0; } EXPORT_SYMBOL(atm_init_aal5);
gpl-2.0
giantdisaster/btrfs
drivers/tty/ipwireless/main.c
12646
8648
/* * IPWireless 3G PCMCIA Network Driver * * Original code * by Stephen Blackheath <stephen@blacksapphire.com>, * Ben Martel <benm@symmetric.co.nz> * * Copyrighted as follows: * Copyright (C) 2004 by Symmetric Systems Ltd (NZ) * * Various driver changes and rewrites, port to new kernels * Copyright (C) 2006-2007 Jiri Kosina * * Misc code cleanups and updates * Copyright (C) 2007 David Sterba */ #include "hardware.h" #include "network.h" #include "main.h" #include "tty.h" #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <pcmcia/cisreg.h> #include <pcmcia/device_id.h> #include <pcmcia/ss.h> #include <pcmcia/ds.h> static const struct pcmcia_device_id ipw_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100), PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, ipw_ids); static void ipwireless_detach(struct pcmcia_device *link); /* * Module params */ /* Debug mode: more verbose, print sent/recv bytes */ int ipwireless_debug; int ipwireless_loopback; int ipwireless_out_queue = 10; module_param_named(debug, ipwireless_debug, int, 0); module_param_named(loopback, ipwireless_loopback, int, 0); module_param_named(out_queue, ipwireless_out_queue, int, 0); MODULE_PARM_DESC(debug, "switch on debug messages [0]"); MODULE_PARM_DESC(loopback, "debug: enable ras_raw channel [0]"); MODULE_PARM_DESC(out_queue, "debug: set size of outgoing PPP queue [10]"); /* Executes in process context. */ static void signalled_reboot_work(struct work_struct *work_reboot) { struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, work_reboot); struct pcmcia_device *link = ipw->link; pcmcia_reset_card(link->socket); } static void signalled_reboot_callback(void *callback_data) { struct ipw_dev *ipw = (struct ipw_dev *) callback_data; /* Delegate to process context. */ schedule_work(&ipw->work_reboot); } static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) { struct ipw_dev *ipw = priv_data; int ret; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* 0x40 causes it to generate level mode interrupts. */ /* 0x04 enables IREQ pin. */ p_dev->config_index |= 0x44; p_dev->io_lines = 16; ret = pcmcia_request_io(p_dev); if (ret) return ret; if (!request_region(p_dev->resource[0]->start, resource_size(p_dev->resource[0]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit; } p_dev->resource[2]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0); if (ret != 0) goto exit1; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); if (ret != 0) goto exit1; ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; ipw->common_memory = ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); if (!request_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit2; } p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit3; ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit3; ipw->attr_memory = ioremap(p_dev->resource[3]->start, resource_size(p_dev->resource[3])); if (!request_mem_region(p_dev->resource[3]->start, resource_size(p_dev->resource[3]), IPWIRELESS_PCCARD_NAME)) { ret = -EBUSY; goto exit4; } return 0; exit4: iounmap(ipw->attr_memory); exit3: release_mem_region(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); exit2: iounmap(ipw->common_memory); exit1: release_region(p_dev->resource[0]->start, resource_size(p_dev->resource[0])); exit: pcmcia_disable_device(p_dev); return ret; } static int config_ipwireless(struct ipw_dev *ipw) { struct pcmcia_device *link = ipw->link; int ret = 0; ipw->is_v2_card = 0; link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM | CONF_ENABLE_IRQ; ret = pcmcia_loop_config(link, ipwireless_probe, ipw); if (ret != 0) return ret; INIT_WORK(&ipw->work_reboot, signalled_reboot_work); ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start, ipw->attr_memory, ipw->common_memory, ipw->is_v2_card, signalled_reboot_callback, ipw); ret = pcmcia_request_irq(link, ipwireless_interrupt); if (ret != 0) goto exit; printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", ipw->is_v2_card ? "V2/V3" : "V1"); printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": I/O ports %pR, irq %d\n", link->resource[0], (unsigned int) link->irq); if (ipw->attr_memory && ipw->common_memory) printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": attr memory %pR, common memory %pR\n", link->resource[3], link->resource[2]); ipw->network = ipwireless_network_create(ipw->hardware); if (!ipw->network) goto exit; ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network); if (!ipw->tty) goto exit; ipwireless_init_hardware_v2_v3(ipw->hardware); /* * Do the RequestConfiguration last, because it enables interrupts. * Then we don't get any interrupts before we're ready for them. */ ret = pcmcia_enable_device(link); if (ret != 0) goto exit; return 0; exit: if (ipw->common_memory) { release_mem_region(link->resource[2]->start, resource_size(link->resource[2])); iounmap(ipw->common_memory); } if (ipw->attr_memory) { release_mem_region(link->resource[3]->start, resource_size(link->resource[3])); iounmap(ipw->attr_memory); } pcmcia_disable_device(link); return -1; } static void release_ipwireless(struct ipw_dev *ipw) { release_region(ipw->link->resource[0]->start, resource_size(ipw->link->resource[0])); if (ipw->common_memory) { release_mem_region(ipw->link->resource[2]->start, resource_size(ipw->link->resource[2])); iounmap(ipw->common_memory); } if (ipw->attr_memory) { release_mem_region(ipw->link->resource[3]->start, resource_size(ipw->link->resource[3])); iounmap(ipw->attr_memory); } pcmcia_disable_device(ipw->link); } /* * ipwireless_attach() creates an "instance" of the driver, allocating * local data structures for one device (one interface). The device * is registered with Card Services. * * The pcmcia_device structure is initialized, but we don't actually * configure the card at this point -- we wait until we receive a * card insertion event. */ static int ipwireless_attach(struct pcmcia_device *link) { struct ipw_dev *ipw; int ret; ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL); if (!ipw) return -ENOMEM; ipw->link = link; link->priv = ipw; ipw->hardware = ipwireless_hardware_create(); if (!ipw->hardware) { kfree(ipw); return -ENOMEM; } /* RegisterClient will call config_ipwireless */ ret = config_ipwireless(ipw); if (ret != 0) { ipwireless_detach(link); return ret; } return 0; } /* * This deletes a driver "instance". The device is de-registered with * Card Services. If it has been released, all local data structures * are freed. Otherwise, the structures will be freed when the device * is released. */ static void ipwireless_detach(struct pcmcia_device *link) { struct ipw_dev *ipw = link->priv; release_ipwireless(ipw); if (ipw->tty != NULL) ipwireless_tty_free(ipw->tty); if (ipw->network != NULL) ipwireless_network_free(ipw->network); if (ipw->hardware != NULL) ipwireless_hardware_free(ipw->hardware); kfree(ipw); } static struct pcmcia_driver me = { .owner = THIS_MODULE, .probe = ipwireless_attach, .remove = ipwireless_detach, .name = IPWIRELESS_PCCARD_NAME, .id_table = ipw_ids }; /* * Module insertion : initialisation of the module. * Register the card with cardmgr... */ static int __init init_ipwireless(void) { int ret; ret = ipwireless_tty_init(); if (ret != 0) return ret; ret = pcmcia_register_driver(&me); if (ret != 0) ipwireless_tty_release(); return ret; } /* * Module removal */ static void __exit exit_ipwireless(void) { pcmcia_unregister_driver(&me); ipwireless_tty_release(); } module_init(init_ipwireless); module_exit(exit_ipwireless); MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR); MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
jokerfr9/DragonsKernel_Kylessopen
arch/x86/kernel/audit_64.c
13158
1870
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { #ifdef CONFIG_IA32_EMULATION if (arch == AUDIT_ARCH_I386) return 1; #endif return 0; } int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_IA32_EMULATION extern int ia32_classify_syscall(unsigned); if (abi == AUDIT_ARCH_I386) return ia32_classify_syscall(syscall); #endif switch(syscall) { case __NR_open: return 2; case __NR_openat: return 3; case __NR_execve: return 5; default: return 0; } } static int __init audit_classes_init(void) { #ifdef CONFIG_IA32_EMULATION extern __u32 ia32_dir_class[]; extern __u32 ia32_write_class[]; extern __u32 ia32_read_class[]; extern __u32 ia32_chattr_class[]; extern __u32 ia32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class); audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class); audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class); #endif audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
gpl-2.0
olafdietsche/linux-accessfs
drivers/staging/rtl8192e/rtllib_tx.c
103
27317
/****************************************************************************** Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: James P. Ketrenos <ipw2100-admin@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andrea.merello@gmail.com> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <linux/if_vlan.h> #include "rtllib.h" /* 802.11 Data Frame 802.11 frame_control for data frames - 2 bytes ,-----------------------------------------------------------------------------------------. bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x | |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------| desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep | | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | | '-----------------------------------------------------------------------------------------' /\ | 802.11 Data Frame | ,--------- 'ctrl' expands to >-----------' | ,--'---,-------------------------------------------------------------. Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |------|------|---------|---------|---------|------|---------|------| Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | | tion | (BSSID) | | | ence | data | | `--------------------------------------------------| |------' Total: 28 non-data bytes `----.----' | .- 'Frame data' expands to <---------------------------' | V ,---------------------------------------------------. Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |------|------|---------|----------|------|---------| Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | DSAP | SSAP | | | | Packet | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | `-----------------------------------------| | Total: 8 non-data bytes `----.----' | .- 'IP Packet' expands, if WEP enabled, to <--' | V ,-----------------------. Bytes | 4 | 0-2296 | 4 | |-----|-----------|-----| Desc. | IV | Encrypted | ICV | | | IP Packet | | `-----------------------' Total: 8 non-data bytes 802.3 Ethernet Data Frame ,-----------------------------------------. Bytes | 6 | 6 | 2 | Variable | 4 | |-------|-------|------|-----------|------| Desc. | Dest. | Source| Type | IP Packet | fcs | | MAC | MAC | | | | `-----------------------------------------' Total: 18 non-data bytes In the event that fragmentation is required, the incoming payload is split into N parts of size ieee->fts. The first fragment contains the SNAP header and the remaining packets are just data. If encryption is enabled, each fragment payload size is reduced by enough space to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to 500 without encryption it will take 3 frames. With WEP it will take 4 frames as the payload of each frame is reduced to 492 bytes. * SKB visualization * * ,- skb->data * | * | ETHERNET HEADER ,-<-- PAYLOAD * | | 14 bytes from skb->data * | 2 bytes for Type --> ,T. | (sizeof ethhdr) * | | | | * |,-Dest.--. ,--Src.---. | | | * | 6 bytes| | 6 bytes | | | | * v | | | | | | * 0 | v 1 | v | v 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * ^ | ^ | ^ | * | | | | | | * | | | | `T' <---- 2 bytes for Type * | | | | * | | '---SNAP--' <-------- 6 bytes for SNAP * | | * `-IV--' <-------------------- 4 bytes for IV (WEP) * * SNAP HEADER * */ static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; inline int rtllib_put_snap(u8 *data, u16 h_proto) { struct rtllib_snap_hdr *snap; u8 *oui; snap = (struct rtllib_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == 0x8137 || h_proto == 0x80f3) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; *(u16 *)(data + SNAP_SIZE) = h_proto; return SNAP_SIZE + sizeof(u16); } int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag, int hdr_len) { struct lib80211_crypt_data *crypt = NULL; int res; crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; if (!(crypt && crypt->ops)) { printk(KERN_INFO "=========>%s(), crypt is null\n", __func__); return -1; } /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_INFO "%s: Encryption failed: len=%d.\n", ieee->dev->name, frag->len); ieee->ieee_stats.tx_discards++; return -1; } return 0; } void rtllib_txb_free(struct rtllib_txb *txb) { if (unlikely(!txb)) return; kfree(txb); } static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size, gfp_t gfp_mask) { struct rtllib_txb *txb; int i; txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct rtllib_txb)); txb->nr_frags = nr_frags; txb->frag_size = cpu_to_le16(txb_size); for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); if (unlikely(!txb->fragments[i])) { i--; break; } memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; } static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu) { struct ethhdr *eth; struct iphdr *ip; eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) return 0; RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len); ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; case 0x40: return 1; case 0x60: return 3; case 0x80: return 4; case 0xa0: return 5; case 0xc0: return 6; case 0xe0: return 7; default: return 0; } } static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee, struct sk_buff *skb, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct tx_ts_record *pTxTs = NULL; struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data; if (rtllib_act_scanning(ieee, false)) return; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (!IsQoSDataFrame(skb->data)) return; if (is_multicast_ether_addr(hdr->addr1)) return; if (tcb_desc->bdhcp || ieee->CntAfterLink < 2) return; if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION) return; if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) return; if (pHTInfo->bCurrentAMPDUEnable) { if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true)) { printk(KERN_INFO "%s: can't get TS\n", __func__); return; } if (pTxTs->TxAdmittedBARecord.bValid == false) { if (ieee->wpa_ie_len && (ieee->pairwise_key_type == KEY_TYPE_NA)) { ; } else if (tcb_desc->bdhcp == 1) { ; } else if (!pTxTs->bDisable_AddBa) { TsStartAddBaProcess(ieee, pTxTs); } goto FORCED_AGG_SETTING; } else if (pTxTs->bUsingBa == false) { if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096)) pTxTs->bUsingBa = true; else goto FORCED_AGG_SETTING; } if (ieee->iw_mode == IW_MODE_INFRA) { tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor; tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity; } } FORCED_AGG_SETTING: switch (pHTInfo->ForcedAMPDUMode) { case HT_AGG_AUTO: break; case HT_AGG_FORCE_ENABLE: tcb_desc->bAMPDUEnable = true; tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity; tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor; break; case HT_AGG_FORCE_DISABLE: tcb_desc->bAMPDUEnable = false; tcb_desc->ampdu_density = 0; tcb_desc->ampdu_factor = 0; break; } return; } static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { tcb_desc->bUseShortPreamble = false; if (tcb_desc->data_rate == 2) return; else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; return; } static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; tcb_desc->bUseShortGI = false; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (pHTInfo->bForcedShortGI) { tcb_desc->bUseShortGI = true; return; } if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz) tcb_desc->bUseShortGI = true; else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz) tcb_desc->bUseShortGI = true; } static void rtllib_query_BandwidthMode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; tcb_desc->bPacketBW = false; if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT) return; if (tcb_desc->bMulticast || tcb_desc->bBroadcast) return; if ((tcb_desc->data_rate & 0x80) == 0) return; if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz) tcb_desc->bPacketBW = true; return; } static void rtllib_query_protectionmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc, struct sk_buff *skb) { tcb_desc->bRTSSTBC = false; tcb_desc->bRTSUseShortGI = false; tcb_desc->bCTSEnable = false; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; if (tcb_desc->bBroadcast || tcb_desc->bMulticast) return; if (is_broadcast_ether_addr(skb->data+16)) return; if (ieee->mode < IEEE_N_24G) { if (skb->len > ieee->rts) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; } else if (ieee->current_network.buseprotection) { tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; } return; } else { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; while (true) { if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) { tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS | HT_IOT_ACT_PURE_N_MODE)) { tcb_desc->bRTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } if (ieee->current_network.buseprotection) { tcb_desc->bRTSEnable = true; tcb_desc->bCTSEnable = true; tcb_desc->rts_rate = MGN_24M; break; } if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) { u8 HTOpMode = pHTInfo->CurrentOpMode; if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) || (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } } if (skb->len > ieee->rts) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = true; break; } if (tcb_desc->bAMPDUEnable) { tcb_desc->rts_rate = MGN_24M; tcb_desc->bRTSEnable = false; break; } goto NO_PROTECTION; } } if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE) tcb_desc->bUseShortPreamble = true; if (ieee->iw_mode == IW_MODE_MASTER) goto NO_PROTECTION; return; NO_PROTECTION: tcb_desc->bRTSEnable = false; tcb_desc->bCTSEnable = false; tcb_desc->rts_rate = 0; tcb_desc->RTSSC = 0; tcb_desc->bRTSBW = false; } static void rtllib_txrate_selectmode(struct rtllib_device *ieee, struct cb_desc *tcb_desc) { if (ieee->bTxDisableRateFallBack) tcb_desc->bTxDisableRateFallBack = true; if (ieee->bTxUseDriverAssingedRate) tcb_desc->bTxUseDriverAssingedRate = true; if (!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate) { if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) tcb_desc->RATRIndex = 0; } } u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb, u8 *dst) { u16 seqnum = 0; if (is_multicast_ether_addr(dst)) return 0; if (IsQoSDataFrame(skb->data)) { struct tx_ts_record *pTS = NULL; if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst, skb->priority, TX_DIR, true)) return 0; seqnum = pTS->TxCurSeq; pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096; return seqnum; } return 0; } static int wme_downgrade_ac(struct sk_buff *skb) { switch (skb->priority) { case 6: case 7: skb->priority = 5; /* VO -> VI */ return 0; case 4: case 5: skb->priority = 3; /* VI -> BE */ return 0; case 0: case 3: skb->priority = 1; /* BE -> BK */ return 0; default: return -1; } } int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev) { struct rtllib_device *ieee = (struct rtllib_device *) netdev_priv_rsl(dev); struct rtllib_txb *txb = NULL; struct rtllib_hdr_3addrqos *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; unsigned long flags; struct net_device_stats *stats = &ieee->stats; int ether_type = 0, encrypt; int bytes, fc, qos_ctl = 0, hdr_len; struct sk_buff *skb_frag; struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0, .qos_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; int qos_actived = ieee->current_network.qos_data.active; struct lib80211_crypt_data *crypt = NULL; struct cb_desc *tcb_desc; u8 bIsMulticast = false; u8 IsAmsdu = false; bool bdhcp = false; spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, don't bother * creating it... */ if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) || ((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) { printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); goto success; } if (likely(ieee->raw_tx == 0)) { if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } /* Save source and destination addresses */ memcpy(dest, skb->data, ETH_ALEN); memcpy(src, skb->data+ETH_ALEN, ETH_ALEN); memset(skb->cb, 0, sizeof(skb->cb)); ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto); if (ieee->iw_mode == IW_MODE_MONITOR) { txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate " "TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = cpu_to_le16(skb->len); memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len); goto success; } if (skb->len > 282) { if (ETH_P_IP == ether_type) { const struct iphdr *ip = (struct iphdr *) ((u8 *)skb->data+14); if (IPPROTO_UDP == ip->protocol) { struct udphdr *udp; udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); if (((((u8 *)udp)[1] == 68) && (((u8 *)udp)[3] == 67)) || ((((u8 *)udp)[1] == 67) && (((u8 *)udp)[3] == 68))) { bdhcp = true; ieee->LPSDelayCnt = 200; } } } else if (ETH_P_ARP == ether_type) { printk(KERN_INFO "=================>DHCP " "Protocol start tx ARP pkt!!\n"); bdhcp = true; ieee->LPSDelayCnt = ieee->current_network.tim.tim_count; } } skb->priority = rtllib_classify(skb, IsAmsdu); crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && ieee->host_encrypt && crypt && crypt->ops; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { stats->tx_dropped++; goto success; } if (crypt && !encrypt && ether_type == ETH_P_PAE) { struct eapol *eap = (struct eapol *)(skb->data + sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16)); RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n", eap_get_type(eap->type)); } /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); if (encrypt) fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP; else fc = RTLLIB_FTYPE_DATA; if (qos_actived) fc |= RTLLIB_STYPE_QOS_DATA; else fc |= RTLLIB_STYPE_DATA; if (ieee->iw_mode == IW_MODE_INFRA) { fc |= RTLLIB_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN); memcpy(&header.addr2, &src, ETH_ALEN); if (IsAmsdu) memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); else memcpy(&header.addr3, &dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(&header.addr1, dest, ETH_ALEN); memcpy(&header.addr2, src, ETH_ALEN); memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN); } bIsMulticast = is_multicast_ether_addr(header.addr1); header.frame_ctl = cpu_to_le16(fc); /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ if (bIsMulticast) { frag_size = MAX_FRAG_THRESHOLD; qos_ctl |= QOS_CTL_NOTCONTAIN_ACK; } else { frag_size = ieee->fts; qos_ctl = 0; } if (qos_actived) { hdr_len = RTLLIB_3ADDR_LEN + 2; /* in case we are a client verify acm is not set for this ac */ while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) { printk(KERN_INFO "skb->priority = %x\n", skb->priority); if (wme_downgrade_ac(skb)) break; printk(KERN_INFO "converted skb->priority = %x\n", skb->priority); } qos_ctl |= skb->priority; header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID); } else { hdr_len = RTLLIB_3ADDR_LEN; } /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account * for it when determining the amount of payload space. */ bytes_per_frag = frag_size - hdr_len; if (ieee->config & (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS)) bytes_per_frag -= RTLLIB_FCS_LEN; /* Each fragment may need to have room for encrypting * pre/postfix */ if (encrypt) { bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_mpdu_postfix_len + crypt->ops->extra_msdu_prefix_len + crypt->ops->extra_msdu_postfix_len; } /* Number of fragments is the total bytes_per_frag / * payload_per_fragment */ nr_frags = bytes / bytes_per_frag; bytes_last_frag = bytes % bytes_per_frag; if (bytes_last_frag) nr_frags++; else bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the * reserve and full fragment bytes (bytes_per_frag doesn't * include prefix, postfix, header, FCS, etc.) */ txb = rtllib_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; txb->payload_size = cpu_to_le16(bytes); if (qos_actived) txb->queue_index = UP2AC(skb->priority); else txb->queue_index = WME_AC_BE; for (i = 0; i < nr_frags; i++) { skb_frag = txb->fragments[i]; tcb_desc = (struct cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE); if (qos_actived) { skb_frag->priority = skb->priority; tcb_desc->queue_index = UP2AC(skb->priority); } else { skb_frag->priority = WME_AC_BE; tcb_desc->queue_index = WME_AC_BE; } skb_reserve(skb_frag, ieee->tx_headroom); if (encrypt) { if (ieee->hwsec_active) tcb_desc->bHwSec = 1; else tcb_desc->bHwSec = 0; skb_reserve(skb_frag, crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_msdu_prefix_len); } else { tcb_desc->bHwSec = 0; } frag_hdr = (struct rtllib_hdr_3addrqos *) skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the * MOREFRAGS bit to the frame control */ if (i != nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16( fc | RTLLIB_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { /* The last fragment has the remaining length */ bytes = bytes_last_frag; } if ((qos_actived) && (!bIsMulticast)) { frag_hdr->seq_ctl = cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag, header.addr1)); frag_hdr->seq_ctl = cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i); } else { frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i); } /* Put a SNAP header on the first fragment */ if (i == 0) { rtllib_put_snap( skb_put(skb_frag, SNAP_SIZE + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } memcpy(skb_put(skb_frag, bytes), skb->data, bytes); /* Advance the SKB... */ skb_pull(skb, bytes); /* Encryption routine will move the header forward in * order to insert the IV between the header and the * payload */ if (encrypt) rtllib_encrypt_fragment(ieee, skb_frag, hdr_len); if (ieee->config & (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS)) skb_put(skb_frag, 4); } if ((qos_actived) && (!bIsMulticast)) { if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF) ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0; else ieee->seq_ctrl[UP2AC(skb->priority) + 1]++; } else { if (ieee->seq_ctrl[0] == 0xFFF) ieee->seq_ctrl[0] = 0; else ieee->seq_ctrl[0]++; } } else { if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC); if (!txb) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = 0; txb->payload_size = cpu_to_le16(skb->len); memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len); } success: if (txb) { struct cb_desc *tcb_desc = (struct cb_desc *) (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bTxEnableFwCalcDur = 1; tcb_desc->priority = skb->priority; if (ether_type == ETH_P_PAE) { if (ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); tcb_desc->bTxDisableRateFallBack = false; } else { tcb_desc->data_rate = ieee->basic_rate; tcb_desc->bTxDisableRateFallBack = 1; } tcb_desc->RATRIndex = 7; tcb_desc->bTxUseDriverAssingedRate = 1; } else { if (is_multicast_ether_addr(header.addr1)) tcb_desc->bMulticast = 1; if (is_broadcast_ether_addr(header.addr1)) tcb_desc->bBroadcast = 1; rtllib_txrate_selectmode(ieee, tcb_desc); if (tcb_desc->bMulticast || tcb_desc->bBroadcast) tcb_desc->data_rate = ieee->basic_rate; else tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate); if (bdhcp) { if (ieee->pHTInfo->IOTAction & HT_IOT_ACT_WA_IOT_Broadcom) { tcb_desc->data_rate = MgntQuery_TxRateExcludeCCKRates(ieee); tcb_desc->bTxDisableRateFallBack = false; } else { tcb_desc->data_rate = MGN_1M; tcb_desc->bTxDisableRateFallBack = 1; } tcb_desc->RATRIndex = 7; tcb_desc->bTxUseDriverAssingedRate = 1; tcb_desc->bdhcp = 1; } rtllib_qurey_ShortPreambleMode(ieee, tcb_desc); rtllib_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc); rtllib_query_HTCapShortGI(ieee, tcb_desc); rtllib_query_BandwidthMode(ieee, tcb_desc); rtllib_query_protectionmode(ieee, tcb_desc, txb->fragments[0]); } } spin_unlock_irqrestore(&ieee->lock, flags); dev_kfree_skb_any(skb); if (txb) { if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) { dev->stats.tx_packets++; dev->stats.tx_bytes += txb->payload_size; rtllib_softmac_xmit(txb, ieee); } else { if ((*ieee->hard_start_xmit)(txb, dev) == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; } rtllib_txb_free(txb); } } return 0; failed: spin_unlock_irqrestore(&ieee->lock, flags); netif_stop_queue(dev); stats->tx_errors++; return 1; } int rtllib_xmit(struct sk_buff *skb, struct net_device *dev) { memset(skb->cb, 0, sizeof(skb->cb)); return rtllib_xmit_inter(skb, dev); } EXPORT_SYMBOL(rtllib_xmit);
gpl-2.0
smihir/wireless-testing
arch/arm/mach-omap2/pm24xx.c
359
8295
/* * OMAP2 Power Management Routines * * Copyright (C) 2005 Texas Instruments, Inc. * Copyright (C) 2006-2008 Nokia Corporation * * Written by: * Richard Woodruff <r-woodruff2@ti.com> * Tony Lindgren * Juha Yrjola * Amit Kucheria <amit.kucheria@nokia.com> * Igor Stoppa <igor.stoppa@nokia.com> * * Based on pm.c for omap1 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/suspend.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include <linux/interrupt.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/clk-provider.h> #include <linux/irq.h> #include <linux/time.h> #include <linux/gpio.h> #include <linux/platform_data/gpio-omap.h> #include <asm/fncpy.h> #include <asm/mach/time.h> #include <asm/mach/irq.h> #include <asm/mach-types.h> #include <asm/system_misc.h> #include <linux/omap-dma.h> #include "soc.h" #include "common.h" #include "clock.h" #include "prm2xxx.h" #include "prm-regbits-24xx.h" #include "cm2xxx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "sram.h" #include "pm.h" #include "control.h" #include "powerdomain.h" #include "clockdomain.h" static void (*omap2_sram_suspend)(u32 dllctrl, void __iomem *sdrc_dlla_ctrl, void __iomem *sdrc_power); static struct powerdomain *mpu_pwrdm, *core_pwrdm; static struct clockdomain *dsp_clkdm, *mpu_clkdm, *wkup_clkdm, *gfx_clkdm; static struct clk *osc_ck, *emul_ck; static int omap2_enter_full_retention(void) { u32 l; /* There is 1 reference hold for all children of the oscillator * clock, the following will remove it. If no one else uses the * oscillator itself it will be disabled if/when we enter retention * mode. */ clk_disable(osc_ck); /* Clear old wake-up events */ /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); /* Workaround to kill USB */ l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL; omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0); omap2_gpio_prepare_for_idle(0); /* One last check for pending IRQs to avoid extra latency due * to sleeping unnecessarily. */ if (omap_irq_pending()) goto no_sleep; /* Jump to SRAM suspend code */ omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL), OMAP_SDRC_REGADDR(SDRC_POWER)); no_sleep: omap2_gpio_resume_after_idle(); clk_enable(osc_ck); /* clear CORE wake-up events */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); /* wakeup domain events - bit 1: GPT1, bit5 GPIO */ omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST); /* MPU domain wake events */ l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x01) omap2_prm_write_mod_reg(0x01, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); if (l & 0x20) omap2_prm_write_mod_reg(0x20, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); /* Mask future PRCM-to-MPU interrupts */ omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_ON); return 0; } static int sti_console_enabled; static int omap2_allow_mpu_retention(void) { if (!omap2xxx_cm_mpu_retention_allowed()) return 0; if (sti_console_enabled) return 0; return 1; } static void omap2_enter_mpu_retention(void) { const int zero = 0; /* The peripherals seem not to be able to wake up the MPU when * it is in retention mode. */ if (omap2_allow_mpu_retention()) { /* REVISIT: These write to reserved bits? */ omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1); omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2); omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST); /* Try to enter MPU retention */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET); } else { /* Block MPU retention */ pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); } /* WFI */ asm("mcr p15, 0, %0, c7, c0, 4" : : "r" (zero) : "memory", "cc"); pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); } static int omap2_can_sleep(void) { if (omap2xxx_cm_fclks_active()) return 0; if (__clk_is_enabled(osc_ck)) return 0; if (omap_dma_running()) return 0; return 1; } static void omap2_pm_idle(void) { if (!omap2_can_sleep()) { if (omap_irq_pending()) return; omap2_enter_mpu_retention(); return; } if (omap_irq_pending()) return; omap2_enter_full_retention(); } static void __init prcm_setup_regs(void) { int i, num_mem_banks; struct powerdomain *pwrdm; /* * Enable autoidle * XXX This should be handled by hwmod code or PRCM init code */ omap2_prm_write_mod_reg(OMAP24XX_AUTOIDLE_MASK, OCP_MOD, OMAP2_PRCM_SYSCONFIG_OFFSET); /* * Set CORE powerdomain memory banks to retain their contents * during RETENTION */ num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm); for (i = 0; i < num_mem_banks; i++) pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET); pwrdm_set_logic_retst(core_pwrdm, PWRDM_POWER_RET); pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET); /* Force-power down DSP, GFX powerdomains */ pwrdm = clkdm_get_pwrdm(dsp_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); pwrdm = clkdm_get_pwrdm(gfx_clkdm); pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF); /* Enable hardware-supervised idle for all clkdms */ clkdm_for_each(omap_pm_clkdms_setup, NULL); clkdm_add_wkdep(mpu_clkdm, wkup_clkdm); #ifdef CONFIG_SUSPEND omap_pm_suspend = omap2_enter_full_retention; #endif /* REVISIT: Configure number of 32 kHz clock cycles for sys_clk * stabilisation */ omap2_prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_CLKSSETUP_OFFSET); /* Configure automatic voltage transition */ omap2_prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTSETUP_OFFSET); omap2_prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT_MASK | (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) | OMAP24XX_MEMRETCTRL_MASK | (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) | (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT), OMAP24XX_GR_MOD, OMAP2_PRCM_VOLTCTRL_OFFSET); /* Enable wake-up events */ omap2_prm_write_mod_reg(OMAP24XX_EN_GPIOS_MASK | OMAP24XX_EN_GPT1_MASK, WKUP_MOD, PM_WKEN); } int __init omap2_pm_init(void) { u32 l; printk(KERN_INFO "Power Management for OMAP2 initializing\n"); l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); /* Look up important powerdomains */ mpu_pwrdm = pwrdm_lookup("mpu_pwrdm"); if (!mpu_pwrdm) pr_err("PM: mpu_pwrdm not found\n"); core_pwrdm = pwrdm_lookup("core_pwrdm"); if (!core_pwrdm) pr_err("PM: core_pwrdm not found\n"); /* Look up important clockdomains */ mpu_clkdm = clkdm_lookup("mpu_clkdm"); if (!mpu_clkdm) pr_err("PM: mpu_clkdm not found\n"); wkup_clkdm = clkdm_lookup("wkup_clkdm"); if (!wkup_clkdm) pr_err("PM: wkup_clkdm not found\n"); dsp_clkdm = clkdm_lookup("dsp_clkdm"); if (!dsp_clkdm) pr_err("PM: dsp_clkdm not found\n"); gfx_clkdm = clkdm_lookup("gfx_clkdm"); if (!gfx_clkdm) pr_err("PM: gfx_clkdm not found\n"); osc_ck = clk_get(NULL, "osc_ck"); if (IS_ERR(osc_ck)) { printk(KERN_ERR "could not get osc_ck\n"); return -ENODEV; } if (cpu_is_omap242x()) { emul_ck = clk_get(NULL, "emul_ck"); if (IS_ERR(emul_ck)) { printk(KERN_ERR "could not get emul_ck\n"); clk_put(osc_ck); return -ENODEV; } } prcm_setup_regs(); /* * We copy the assembler sleep/wakeup routines to SRAM. * These routines need to be in SRAM as that's the only * memory the MPU can see when it wakes up after the entire * chip enters idle. */ omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, omap24xx_cpu_suspend_sz); arm_pm_idle = omap2_pm_idle; return 0; }
gpl-2.0
brycecr/linux
drivers/net/wireless/iwlwifi/iwl-5000.c
615
5862
/****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-agn-hw.h" #include "iwl-csr.h" /* Highest firmware API version supported */ #define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 /* Oldest version we won't warn about */ #define IWL5000_UCODE_API_OK 5 #define IWL5150_UCODE_API_OK 2 /* Lowest firmware API version supported */ #define IWL5000_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1 /* EEPROM versions */ #define EEPROM_5000_TX_POWER_VERSION (4) #define EEPROM_5000_EEPROM_VERSION (0x11A) #define EEPROM_5050_TX_POWER_VERSION (4) #define EEPROM_5050_EEPROM_VERSION (0x21E) #define IWL5000_FW_PRE "iwlwifi-5000-" #define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" #define IWL5150_FW_PRE "iwlwifi-5150-" #define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl5000_base_params = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 512, .scd_chain_ext_wa = true, }; static const struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; static const struct iwl_eeprom_params iwl5000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, EEPROM_REG_BAND_2_CHANNELS, EEPROM_REG_BAND_3_CHANNELS, EEPROM_REG_BAND_4_CHANNELS, EEPROM_REG_BAND_5_CHANNELS, EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REG_BAND_52_HT40_CHANNELS }, }; #define IWL_DEVICE_5000 \ .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ .ucode_api_ok = IWL5000_UCODE_API_OK, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", IWL_DEVICE_5000, /* at least EEPROM 0x11A has wrong info */ .valid_tx_ant = ANT_ABC, /* .cfg overwrite */ .valid_rx_ant = ANT_ABC, /* .cfg overwrite */ .ht_params = &iwl5000_ht_params, }; const struct iwl_cfg iwl5100_bgn_cfg = { .name = "Intel(R) WiFi Link 5100 BGN", IWL_DEVICE_5000, .valid_tx_ant = ANT_B, /* .cfg overwrite */ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ .ht_params = &iwl5000_ht_params, }; const struct iwl_cfg iwl5100_abg_cfg = { .name = "Intel(R) WiFi Link 5100 ABG", IWL_DEVICE_5000, .valid_tx_ant = ANT_B, /* .cfg overwrite */ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ }; const struct iwl_cfg iwl5100_agn_cfg = { .name = "Intel(R) WiFi Link 5100 AGN", IWL_DEVICE_5000, .valid_tx_ant = ANT_B, /* .cfg overwrite */ .valid_rx_ant = ANT_AB, /* .cfg overwrite */ .ht_params = &iwl5000_ht_params, }; const struct iwl_cfg iwl5350_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, .ucode_api_ok = IWL5000_UCODE_API_OK, .ucode_api_min = IWL5000_UCODE_API_MIN, .device_family = IWL_DEVICE_FAMILY_5000, .max_inst_size = IWLAGN_RTC_INST_SIZE, .max_data_size = IWLAGN_RTC_DATA_SIZE, .nvm_ver = EEPROM_5050_EEPROM_VERSION, .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, .base_params = &iwl5000_base_params, .eeprom_params = &iwl5000_eeprom_params, .ht_params = &iwl5000_ht_params, .led_mode = IWL_LED_BLINK, .internal_wimax_coex = true, }; #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ .ucode_api_ok = IWL5150_UCODE_API_OK, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_5150, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_5050_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", IWL_DEVICE_5150, .ht_params = &iwl5000_ht_params, }; const struct iwl_cfg iwl5150_abg_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", IWL_DEVICE_5150, }; MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
gpl-2.0
manuelnaranjo/goldenleaf
arch/powerpc/kernel/of_device.c
615
3261
#include <linux/string.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/of_device.h> #include <asm/errno.h> #include <asm/dcr.h> static void of_device_make_bus_id(struct of_device *dev) { static atomic_t bus_no_reg_magic; struct device_node *node = dev->node; const u32 *reg; u64 addr; int magic; /* * If it's a DCR based device, use 'd' for native DCRs * and 'D' for MMIO DCRs. */ #ifdef CONFIG_PPC_DCR reg = of_get_property(node, "dcr-reg", NULL); if (reg) { #ifdef CONFIG_PPC_DCR_NATIVE dev_set_name(&dev->dev, "d%x.%s", *reg, node->name); #else /* CONFIG_PPC_DCR_NATIVE */ addr = of_translate_dcr_address(node, *reg, NULL); if (addr != OF_BAD_ADDR) { dev_set_name(&dev->dev, "D%llx.%s", (unsigned long long)addr, node->name); return; } #endif /* !CONFIG_PPC_DCR_NATIVE */ } #endif /* CONFIG_PPC_DCR */ /* * For MMIO, get the physical address */ reg = of_get_property(node, "reg", NULL); if (reg) { addr = of_translate_address(node, reg); if (addr != OF_BAD_ADDR) { dev_set_name(&dev->dev, "%llx.%s", (unsigned long long)addr, node->name); return; } } /* * No BusID, use the node name and add a globally incremented * counter (and pray...) */ magic = atomic_add_return(1, &bus_no_reg_magic); dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1); } struct of_device *of_device_alloc(struct device_node *np, const char *bus_id, struct device *parent) { struct of_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; dev->node = of_node_get(np); dev->dev.dma_mask = &dev->dma_mask; dev->dev.parent = parent; dev->dev.release = of_release_dev; dev->dev.archdata.of_node = np; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(dev); return dev; } EXPORT_SYMBOL(of_device_alloc); int of_device_uevent(struct device *dev, struct kobj_uevent_env *env) { struct of_device *ofdev; const char *compat; int seen = 0, cplen, sl; if (!dev) return -ENODEV; ofdev = to_of_device(dev); if (add_uevent_var(env, "OF_NAME=%s", ofdev->node->name)) return -ENOMEM; if (add_uevent_var(env, "OF_TYPE=%s", ofdev->node->type)) return -ENOMEM; /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ compat = of_get_property(ofdev->node, "compatible", &cplen); while (compat && *compat && cplen > 0) { if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat)) return -ENOMEM; sl = strlen (compat) + 1; compat += sl; cplen -= sl; seen++; } if (add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen)) return -ENOMEM; /* modalias is trickier, we add it in 2 steps */ if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; sl = of_device_get_modalias(ofdev, &env->buf[env->buflen-1], sizeof(env->buf) - env->buflen); if (sl >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += sl; return 0; } EXPORT_SYMBOL(of_device_uevent); EXPORT_SYMBOL(of_device_get_modalias);
gpl-2.0
CL0SeY/kernel_P7320T_ICS
net/netfilter/xt_hashlimit.c
2151
22473
/* * xt_hashlimit - Netfilter module to limit the number of packets per time * separately for each hashbucket (sourceip/sourceport/dstip/dstport) * * (C) 2003-2004 by Harald Welte <laforge@netfilter.org> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * * Development of this code was funded by Astaro AG, http://www.astaro.com/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <linux/in.h> #include <linux/ip.h> #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #include <linux/ipv6.h> #include <net/ipv6.h> #endif #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/xt_hashlimit.h> #include <linux/mutex.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match"); MODULE_ALIAS("ipt_hashlimit"); MODULE_ALIAS("ip6t_hashlimit"); struct hashlimit_net { struct hlist_head htables; struct proc_dir_entry *ipt_hashlimit; struct proc_dir_entry *ip6t_hashlimit; }; static int hashlimit_net_id; static inline struct hashlimit_net *hashlimit_pernet(struct net *net) { return net_generic(net, hashlimit_net_id); } /* need to declare this at the top */ static const struct file_operations dl_file_ops; /* hash table crap */ struct dsthash_dst { union { struct { __be32 src; __be32 dst; } ip; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) struct { __be32 src[4]; __be32 dst[4]; } ip6; #endif }; __be16 src_port; __be16 dst_port; }; struct dsthash_ent { /* static / read-only parts in the beginning */ struct hlist_node node; struct dsthash_dst dst; /* modified structure members in the end */ spinlock_t lock; unsigned long expires; /* precalculated expiry time */ struct { unsigned long prev; /* last modification */ u_int32_t credit; u_int32_t credit_cap, cost; } rateinfo; struct rcu_head rcu; }; struct xt_hashlimit_htable { struct hlist_node node; /* global list of all htables */ int use; u_int8_t family; bool rnd_initialized; struct hashlimit_cfg1 cfg; /* config */ /* used internally */ spinlock_t lock; /* lock for list_head */ u_int32_t rnd; /* random seed for hash */ unsigned int count; /* number entries in table */ struct timer_list timer; /* timer for gc */ /* seq_file stuff */ struct proc_dir_entry *pde; struct net *net; struct hlist_head hash[0]; /* hashtable itself */ }; static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */ static struct kmem_cache *hashlimit_cachep __read_mostly; static inline bool dst_cmp(const struct dsthash_ent *ent, const struct dsthash_dst *b) { return !memcmp(&ent->dst, b, sizeof(ent->dst)); } static u_int32_t hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) { u_int32_t hash = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), ht->rnd); /* * Instead of returning hash % ht->cfg.size (implying a divide) * we return the high 32 bits of the (hash * ht->cfg.size) that will * give results between [0 and cfg.size-1] and same hash distribution, * but using a multiply, less expensive than a divide */ return ((u64)hash * ht->cfg.size) >> 32; } static struct dsthash_ent * dsthash_find(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) { struct dsthash_ent *ent; struct hlist_node *pos; u_int32_t hash = hash_dst(ht, dst); if (!hlist_empty(&ht->hash[hash])) { hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node) if (dst_cmp(ent, dst)) { spin_lock(&ent->lock); return ent; } } return NULL; } /* allocate dsthash_ent, initialize dst, put in htable and lock it */ static struct dsthash_ent * dsthash_alloc_init(struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) { struct dsthash_ent *ent; spin_lock(&ht->lock); /* initialize hash with random val at the time we allocate * the first hashtable entry */ if (unlikely(!ht->rnd_initialized)) { get_random_bytes(&ht->rnd, sizeof(ht->rnd)); ht->rnd_initialized = true; } if (ht->cfg.max && ht->count >= ht->cfg.max) { /* FIXME: do something. question is what.. */ if (net_ratelimit()) pr_err("max count of %u reached\n", ht->cfg.max); ent = NULL; } else ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); if (!ent) { if (net_ratelimit()) pr_err("cannot allocate dsthash_ent\n"); } else { memcpy(&ent->dst, dst, sizeof(ent->dst)); spin_lock_init(&ent->lock); spin_lock(&ent->lock); hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); ht->count++; } spin_unlock(&ht->lock); return ent; } static void dsthash_free_rcu(struct rcu_head *head) { struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); kmem_cache_free(hashlimit_cachep, ent); } static inline void dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) { hlist_del_rcu(&ent->node); call_rcu_bh(&ent->rcu, dsthash_free_rcu); ht->count--; } static void htable_gc(unsigned long htlong); static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; unsigned int size; unsigned int i; if (minfo->cfg.size) { size = minfo->cfg.size; } else { size = (totalram_pages << PAGE_SHIFT) / 16384 / sizeof(struct list_head); if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE) size = 8192; if (size < 16) size = 16; } /* FIXME: don't use vmalloc() here or anywhere else -HW */ hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) + sizeof(struct list_head) * size); if (hinfo == NULL) return -ENOMEM; minfo->hinfo = hinfo; /* copy match config into hashtable config */ memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg)); hinfo->cfg.size = size; if (hinfo->cfg.max == 0) hinfo->cfg.max = 8 * hinfo->cfg.size; else if (hinfo->cfg.max < hinfo->cfg.size) hinfo->cfg.max = hinfo->cfg.size; for (i = 0; i < hinfo->cfg.size; i++) INIT_HLIST_HEAD(&hinfo->hash[i]); hinfo->use = 1; hinfo->count = 0; hinfo->family = family; hinfo->rnd_initialized = false; spin_lock_init(&hinfo->lock); hinfo->pde = proc_create_data(minfo->name, 0, (family == NFPROTO_IPV4) ? hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit, &dl_file_ops, hinfo); if (hinfo->pde == NULL) { vfree(hinfo); return -ENOMEM; } hinfo->net = net; setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo); hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval); add_timer(&hinfo->timer); hlist_add_head(&hinfo->node, &hashlimit_net->htables); return 0; } static bool select_all(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he) { return 1; } static bool select_gc(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he) { return time_after_eq(jiffies, he->expires); } static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool (*select)(const struct xt_hashlimit_htable *ht, const struct dsthash_ent *he)) { unsigned int i; /* lock hash table and iterate over it */ spin_lock_bh(&ht->lock); for (i = 0; i < ht->cfg.size; i++) { struct dsthash_ent *dh; struct hlist_node *pos, *n; hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) { if ((*select)(ht, dh)) dsthash_free(ht, dh); } } spin_unlock_bh(&ht->lock); } /* hash table garbage collector, run by timer */ static void htable_gc(unsigned long htlong) { struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong; htable_selective_cleanup(ht, select_gc); /* re-add the timer accordingly */ ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval); add_timer(&ht->timer); } static void htable_destroy(struct xt_hashlimit_htable *hinfo) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net); struct proc_dir_entry *parent; del_timer_sync(&hinfo->timer); if (hinfo->family == NFPROTO_IPV4) parent = hashlimit_net->ipt_hashlimit; else parent = hashlimit_net->ip6t_hashlimit; remove_proc_entry(hinfo->pde->name, parent); htable_selective_cleanup(hinfo, select_all); vfree(hinfo); } static struct xt_hashlimit_htable *htable_find_get(struct net *net, const char *name, u_int8_t family) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); struct xt_hashlimit_htable *hinfo; struct hlist_node *pos; hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) { if (!strcmp(name, hinfo->pde->name) && hinfo->family == family) { hinfo->use++; return hinfo; } } return NULL; } static void htable_put(struct xt_hashlimit_htable *hinfo) { mutex_lock(&hashlimit_mutex); if (--hinfo->use == 0) { hlist_del(&hinfo->node); htable_destroy(hinfo); } mutex_unlock(&hashlimit_mutex); } /* The algorithm used is the Simple Token Bucket Filter (TBF) * see net/sched/sch_tbf.c in the linux source tree */ /* Rusty: This is my (non-mathematically-inclined) understanding of this algorithm. The `average rate' in jiffies becomes your initial amount of credit `credit' and the most credit you can ever have `credit_cap'. The `peak rate' becomes the cost of passing the test, `cost'. `prev' tracks the last packet hit: you gain one credit per jiffy. If you get credit balance more than this, the extra credit is discarded. Every time the match passes, you lose `cost' credits; if you don't have that many, the test fails. See Alexey's formal explanation in net/sched/sch_tbf.c. To get the maximum range, we multiply by this factor (ie. you get N credits per jiffy). We want to allow a rate as low as 1 per day (slowest userspace tool allows), which means CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie. */ #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24)) /* Repeated shift and or gives us all 1s, final shift and add 1 gives * us the power of 2 below the theoretical max, so GCC simply does a * shift. */ #define _POW2_BELOW2(x) ((x)|((x)>>1)) #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2)) #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4)) #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8)) #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16)) #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) /* Precision saver. */ static inline u_int32_t user2credits(u_int32_t user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) /* Divide first. */ return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY; return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) { dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; if (dh->rateinfo.credit > dh->rateinfo.credit_cap) dh->rateinfo.credit = dh->rateinfo.credit_cap; dh->rateinfo.prev = now; } static inline __be32 maskl(__be32 a, unsigned int l) { return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0; } #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) { switch (p) { case 0 ... 31: i[0] = maskl(i[0], p); i[1] = i[2] = i[3] = 0; break; case 32 ... 63: i[1] = maskl(i[1], p - 32); i[2] = i[3] = 0; break; case 64 ... 95: i[2] = maskl(i[2], p - 64); i[3] = 0; break; case 96 ... 127: i[3] = maskl(i[3], p - 96); break; case 128: break; } } #endif static int hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst, const struct sk_buff *skb, unsigned int protoff) { __be16 _ports[2], *ports; u8 nexthdr; int poff; memset(dst, 0, sizeof(*dst)); switch (hinfo->family) { case NFPROTO_IPV4: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) dst->ip.dst = maskl(ip_hdr(skb)->daddr, hinfo->cfg.dstmask); if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) dst->ip.src = maskl(ip_hdr(skb)->saddr, hinfo->cfg.srcmask); if (!(hinfo->cfg.mode & (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; nexthdr = ip_hdr(skb)->protocol; break; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case NFPROTO_IPV6: if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) { memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr, sizeof(dst->ip6.dst)); hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask); } if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) { memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr, sizeof(dst->ip6.src)); hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask); } if (!(hinfo->cfg.mode & (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT))) return 0; nexthdr = ipv6_hdr(skb)->nexthdr; protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr); if ((int)protoff < 0) return -1; break; #endif default: BUG(); return 0; } poff = proto_ports_offset(nexthdr); if (poff >= 0) { ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports), &_ports); } else { _ports[0] = _ports[1] = 0; ports = _ports; } if (!ports) return -1; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT) dst->src_port = ports[0]; if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT) dst->dst_port = ports[1]; return 0; } static bool hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; struct xt_hashlimit_htable *hinfo = info->hinfo; unsigned long now = jiffies; struct dsthash_ent *dh; struct dsthash_dst dst; if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) goto hotdrop; rcu_read_lock_bh(); dh = dsthash_find(hinfo, &dst); if (dh == NULL) { dh = dsthash_alloc_init(hinfo, &dst); if (dh == NULL) { rcu_read_unlock_bh(); goto hotdrop; } dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire); dh->rateinfo.prev = jiffies; dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); dh->rateinfo.cost = user2credits(hinfo->cfg.avg); } else { /* update expiration timeout */ dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire); rateinfo_recalc(dh, now); } if (dh->rateinfo.credit >= dh->rateinfo.cost) { /* below the limit */ dh->rateinfo.credit -= dh->rateinfo.cost; spin_unlock(&dh->lock); rcu_read_unlock_bh(); return !(info->cfg.mode & XT_HASHLIMIT_INVERT); } spin_unlock(&dh->lock); rcu_read_unlock_bh(); /* default match is underlimit - so over the limit, we need to invert */ return info->cfg.mode & XT_HASHLIMIT_INVERT; hotdrop: par->hotdrop = true; return false; } static int hashlimit_mt_check(const struct xt_mtchk_param *par) { struct net *net = par->net; struct xt_hashlimit_mtinfo1 *info = par->matchinfo; int ret; /* Check for overflow. */ if (info->cfg.burst == 0 || user2credits(info->cfg.avg * info->cfg.burst) < user2credits(info->cfg.avg)) { pr_info("overflow, try lower: %u/%u\n", info->cfg.avg, info->cfg.burst); return -ERANGE; } if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) return -EINVAL; if (info->name[sizeof(info->name)-1] != '\0') return -EINVAL; if (par->family == NFPROTO_IPV4) { if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) return -EINVAL; } else { if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128) return -EINVAL; } mutex_lock(&hashlimit_mutex); info->hinfo = htable_find_get(net, info->name, par->family); if (info->hinfo == NULL) { ret = htable_create(net, info, par->family); if (ret < 0) { mutex_unlock(&hashlimit_mutex); return ret; } } mutex_unlock(&hashlimit_mutex); return 0; } static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_hashlimit_mtinfo1 *info = par->matchinfo; htable_put(info->hinfo); } static struct xt_match hashlimit_mt_reg[] __read_mostly = { { .name = "hashlimit", .revision = 1, .family = NFPROTO_IPV4, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, }, #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) { .name = "hashlimit", .revision = 1, .family = NFPROTO_IPV6, .match = hashlimit_mt, .matchsize = sizeof(struct xt_hashlimit_mtinfo1), .checkentry = hashlimit_mt_check, .destroy = hashlimit_mt_destroy, .me = THIS_MODULE, }, #endif }; /* PROC stuff */ static void *dl_seq_start(struct seq_file *s, loff_t *pos) __acquires(htable->lock) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket; spin_lock_bh(&htable->lock); if (*pos >= htable->cfg.size) return NULL; bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); if (!bucket) return ERR_PTR(-ENOMEM); *bucket = *pos; return bucket; } static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; *pos = ++(*bucket); if (*pos >= htable->cfg.size) { kfree(v); return NULL; } return bucket; } static void dl_seq_stop(struct seq_file *s, void *v) __releases(htable->lock) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; if (!IS_ERR(bucket)) kfree(bucket); spin_unlock_bh(&htable->lock); } static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, struct seq_file *s) { int res; spin_lock(&ent->lock); /* recalculate to show accurate numbers */ rateinfo_recalc(ent, jiffies); switch (family) { case NFPROTO_IPV4: res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, &ent->dst.ip.src, ntohs(ent->dst.src_port), &ent->dst.ip.dst, ntohs(ent->dst.dst_port), ent->rateinfo.credit, ent->rateinfo.credit_cap, ent->rateinfo.cost); break; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case NFPROTO_IPV6: res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, &ent->dst.ip6.src, ntohs(ent->dst.src_port), &ent->dst.ip6.dst, ntohs(ent->dst.dst_port), ent->rateinfo.credit, ent->rateinfo.credit_cap, ent->rateinfo.cost); break; #endif default: BUG(); res = 0; } spin_unlock(&ent->lock); return res; } static int dl_seq_show(struct seq_file *s, void *v) { struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; struct dsthash_ent *ent; struct hlist_node *pos; if (!hlist_empty(&htable->hash[*bucket])) { hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node) if (dl_seq_real_show(ent, htable->family, s)) return -1; } return 0; } static const struct seq_operations dl_seq_ops = { .start = dl_seq_start, .next = dl_seq_next, .stop = dl_seq_stop, .show = dl_seq_show }; static int dl_proc_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &dl_seq_ops); if (!ret) { struct seq_file *sf = file->private_data; sf->private = PDE(inode)->data; } return ret; } static const struct file_operations dl_file_ops = { .owner = THIS_MODULE, .open = dl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release }; static int __net_init hashlimit_proc_net_init(struct net *net) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net); if (!hashlimit_net->ipt_hashlimit) return -ENOMEM; #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net); if (!hashlimit_net->ip6t_hashlimit) { proc_net_remove(net, "ipt_hashlimit"); return -ENOMEM; } #endif return 0; } static void __net_exit hashlimit_proc_net_exit(struct net *net) { proc_net_remove(net, "ipt_hashlimit"); #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) proc_net_remove(net, "ip6t_hashlimit"); #endif } static int __net_init hashlimit_net_init(struct net *net) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); INIT_HLIST_HEAD(&hashlimit_net->htables); return hashlimit_proc_net_init(net); } static void __net_exit hashlimit_net_exit(struct net *net) { struct hashlimit_net *hashlimit_net = hashlimit_pernet(net); BUG_ON(!hlist_empty(&hashlimit_net->htables)); hashlimit_proc_net_exit(net); } static struct pernet_operations hashlimit_net_ops = { .init = hashlimit_net_init, .exit = hashlimit_net_exit, .id = &hashlimit_net_id, .size = sizeof(struct hashlimit_net), }; static int __init hashlimit_mt_init(void) { int err; err = register_pernet_subsys(&hashlimit_net_ops); if (err < 0) return err; err = xt_register_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); if (err < 0) goto err1; err = -ENOMEM; hashlimit_cachep = kmem_cache_create("xt_hashlimit", sizeof(struct dsthash_ent), 0, 0, NULL); if (!hashlimit_cachep) { pr_warning("unable to create slab cache\n"); goto err2; } return 0; err2: xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); err1: unregister_pernet_subsys(&hashlimit_net_ops); return err; } static void __exit hashlimit_mt_exit(void) { xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg)); unregister_pernet_subsys(&hashlimit_net_ops); rcu_barrier_bh(); kmem_cache_destroy(hashlimit_cachep); } module_init(hashlimit_mt_init); module_exit(hashlimit_mt_exit);
gpl-2.0
omnirom/android_kernel_moto_shamu
drivers/usb/host/u132-hcd.c
2151
92897
/* * Host Controller Driver for the Elan Digital Systems U132 adapter * * Copyright(C) 2006 Elan Digital Systems Limited * http://www.elandigitalsystems.com * * Author and Maintainer - Tony Olech - Elan Digital Systems * tony.olech@elandigitalsystems.com * * This program is free software;you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com) * based on various USB host drivers in the 2.6.15 linux kernel * with constant reference to the 3rd Edition of Linux Device Drivers * published by O'Reilly * * The U132 adapter is a USB to CardBus adapter specifically designed * for PC cards that contain an OHCI host controller. Typical PC cards * are the Orange Mobile 3G Option GlobeTrotter Fusion card. * * The U132 adapter will *NOT *work with PC cards that do not contain * an OHCI controller. A simple way to test whether a PC card has an * OHCI controller as an interface is to insert the PC card directly * into a laptop(or desktop) with a CardBus slot and if "lspci" shows * a new USB controller and "lsusb -v" shows a new OHCI Host Controller * then there is a good chance that the U132 adapter will support the * PC card.(you also need the specific client driver for the PC card) * * Please inform the Author and Maintainer about any PC cards that * contain OHCI Host Controller and work when directly connected to * an embedded CardBus slot but do not work when they are connected * via an ELAN U132 adapter. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/pci_ids.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> /* FIXME ohci.h is ONLY for internal use by the OHCI driver. * If you're going to try stuff like this, you need to split * out shareable stuff (register declarations?) into its own * file, maybe name <linux/usb/ohci.h> */ #include "ohci.h" #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \ OHCI_INTR_WDH) MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited"); MODULE_DESCRIPTION("U132 USB Host Controller Driver"); MODULE_LICENSE("GPL"); #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444) INT_MODULE_PARM(testing, 0); /* Some boards misreport power switching/overcurrent*/ static bool distrust_firmware = 1; module_param(distrust_firmware, bool, 0); MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren" "t setup"); static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait); /* * u132_module_lock exists to protect access to global variables * */ static struct mutex u132_module_lock; static int u132_exiting; static int u132_instances; static struct list_head u132_static_list; /* * end of the global variables protected by u132_module_lock */ static struct workqueue_struct *workqueue; #define MAX_U132_PORTS 7 #define MAX_U132_ADDRS 128 #define MAX_U132_UDEVS 4 #define MAX_U132_ENDPS 100 #define MAX_U132_RINGS 4 static const char *cc_to_text[16] = { "No Error ", "CRC Error ", "Bit Stuff ", "Data Togg ", "Stall ", "DevNotResp ", "PIDCheck ", "UnExpPID ", "DataOver ", "DataUnder ", "(for hw) ", "(for hw) ", "BufferOver ", "BuffUnder ", "(for HCD) ", "(for HCD) " }; struct u132_port { struct u132 *u132; int reset; int enable; int power; int Status; }; struct u132_addr { u8 address; }; struct u132_udev { struct kref kref; struct usb_device *usb_device; u8 enumeration; u8 udev_number; u8 usb_addr; u8 portnumber; u8 endp_number_in[16]; u8 endp_number_out[16]; }; #define ENDP_QUEUE_SHIFT 3 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT) #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1) struct u132_urbq { struct list_head urb_more; struct urb *urb; }; struct u132_spin { spinlock_t slock; }; struct u132_endp { struct kref kref; u8 udev_number; u8 endp_number; u8 usb_addr; u8 usb_endp; struct u132 *u132; struct list_head endp_ring; struct u132_ring *ring; unsigned toggle_bits:2; unsigned active:1; unsigned delayed:1; unsigned input:1; unsigned output:1; unsigned pipetype:2; unsigned dequeueing:1; unsigned edset_flush:1; unsigned spare_bits:14; unsigned long jiffies; struct usb_host_endpoint *hep; struct u132_spin queue_lock; u16 queue_size; u16 queue_last; u16 queue_next; struct urb *urb_list[ENDP_QUEUE_SIZE]; struct list_head urb_more; struct delayed_work scheduler; }; struct u132_ring { unsigned in_use:1; unsigned length:7; u8 number; struct u132 *u132; struct u132_endp *curr_endp; struct delayed_work scheduler; }; struct u132 { struct kref kref; struct list_head u132_list; struct mutex sw_lock; struct mutex scheduler_lock; struct u132_platform_data *board; struct platform_device *platform_dev; struct u132_ring ring[MAX_U132_RINGS]; int sequence_num; int going; int power; int reset; int num_ports; u32 hc_control; u32 hc_fminterval; u32 hc_roothub_status; u32 hc_roothub_a; u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; int flags; unsigned long next_statechange; struct delayed_work monitor; int num_endpoints; struct u132_addr addr[MAX_U132_ADDRS]; struct u132_udev udev[MAX_U132_UDEVS]; struct u132_port port[MAX_U132_PORTS]; struct u132_endp *endp[MAX_U132_ENDPS]; }; /* * these cannot be inlines because we need the structure offset!! * Does anyone have a better way????? */ #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \ offsetof(struct ohci_regs, member), 0, data); #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \ offsetof(struct ohci_regs, member), 0, data); #define u132_read_pcimem(u132, member, data) \ usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \ ohci_regs, member), 0, data); #define u132_write_pcimem(u132, member, data) \ usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \ ohci_regs, member), 0, data); static inline struct u132 *udev_to_u132(struct u132_udev *udev) { u8 udev_number = udev->udev_number; return container_of(udev, struct u132, udev[udev_number]); } static inline struct u132 *hcd_to_u132(struct usb_hcd *hcd) { return (struct u132 *)(hcd->hcd_priv); } static inline struct usb_hcd *u132_to_hcd(struct u132 *u132) { return container_of((void *)u132, struct usb_hcd, hcd_priv); } static inline void u132_disable(struct u132 *u132) { u132_to_hcd(u132)->state = HC_STATE_HALT; } #define kref_to_u132(d) container_of(d, struct u132, kref) #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref) #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref) #include "../misc/usb_u132.h" static const char hcd_name[] = "u132_hcd"; #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \ USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \ USB_PORT_STAT_C_RESET) << 16) static void u132_hcd_delete(struct kref *kref) { struct u132 *u132 = kref_to_u132(kref); struct platform_device *pdev = u132->platform_dev; struct usb_hcd *hcd = u132_to_hcd(u132); u132->going += 1; mutex_lock(&u132_module_lock); list_del_init(&u132->u132_list); u132_instances -= 1; mutex_unlock(&u132_module_lock); dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13" "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev); usb_put_hcd(hcd); } static inline void u132_u132_put_kref(struct u132 *u132) { kref_put(&u132->kref, u132_hcd_delete); } static inline void u132_u132_init_kref(struct u132 *u132) { kref_init(&u132->kref); } static void u132_udev_delete(struct kref *kref) { struct u132_udev *udev = kref_to_u132_udev(kref); udev->udev_number = 0; udev->usb_device = NULL; udev->usb_addr = 0; udev->enumeration = 0; } static inline void u132_udev_put_kref(struct u132 *u132, struct u132_udev *udev) { kref_put(&udev->kref, u132_udev_delete); } static inline void u132_udev_get_kref(struct u132 *u132, struct u132_udev *udev) { kref_get(&udev->kref); } static inline void u132_udev_init_kref(struct u132 *u132, struct u132_udev *udev) { kref_init(&udev->kref); } static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring) { kref_put(&u132->kref, u132_hcd_delete); } static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, unsigned int delta) { if (delta > 0) { if (queue_delayed_work(workqueue, &ring->scheduler, delta)) return; } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) return; kref_put(&u132->kref, u132_hcd_delete); } static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring, unsigned int delta) { kref_get(&u132->kref); u132_ring_requeue_work(u132, ring, delta); } static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring) { if (cancel_delayed_work(&ring->scheduler)) kref_put(&u132->kref, u132_hcd_delete); } static void u132_endp_delete(struct kref *kref) { struct u132_endp *endp = kref_to_u132_endp(kref); struct u132 *u132 = endp->u132; u8 usb_addr = endp->usb_addr; u8 usb_endp = endp->usb_endp; u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; u8 endp_number = endp->endp_number; struct usb_host_endpoint *hep = endp->hep; struct u132_ring *ring = endp->ring; struct list_head *head = &endp->endp_ring; ring->length -= 1; if (endp == ring->curr_endp) { if (list_empty(head)) { ring->curr_endp = NULL; list_del(head); } else { struct u132_endp *next_endp = list_entry(head->next, struct u132_endp, endp_ring); ring->curr_endp = next_endp; list_del(head); } } else list_del(head); if (endp->input) { udev->endp_number_in[usb_endp] = 0; u132_udev_put_kref(u132, udev); } if (endp->output) { udev->endp_number_out[usb_endp] = 0; u132_udev_put_kref(u132, udev); } u132->endp[endp_number - 1] = NULL; hep->hcpriv = NULL; kfree(endp); u132_u132_put_kref(u132); } static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp) { kref_put(&endp->kref, u132_endp_delete); } static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp) { kref_get(&endp->kref); } static inline void u132_endp_init_kref(struct u132 *u132, struct u132_endp *endp) { kref_init(&endp->kref); kref_get(&u132->kref); } static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, unsigned int delta) { if (queue_delayed_work(workqueue, &endp->scheduler, delta)) kref_get(&endp->kref); } static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) { if (cancel_delayed_work(&endp->scheduler)) kref_put(&endp->kref, u132_endp_delete); } static inline void u132_monitor_put_kref(struct u132 *u132) { kref_put(&u132->kref, u132_hcd_delete); } static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) { if (queue_delayed_work(workqueue, &u132->monitor, delta)) kref_get(&u132->kref); } static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) { if (!queue_delayed_work(workqueue, &u132->monitor, delta)) kref_put(&u132->kref, u132_hcd_delete); } static void u132_monitor_cancel_work(struct u132 *u132) { if (cancel_delayed_work(&u132->monitor)) kref_put(&u132->kref, u132_hcd_delete); } static int read_roothub_info(struct u132 *u132) { u32 revision; int retval; retval = u132_read_pcimem(u132, revision, &revision); if (retval) { dev_err(&u132->platform_dev->dev, "error %d accessing device co" "ntrol\n", retval); return retval; } else if ((revision & 0xFF) == 0x10) { } else if ((revision & 0xFF) == 0x11) { } else { dev_err(&u132->platform_dev->dev, "device revision is not valid" " %08X\n", revision); return -ENODEV; } retval = u132_read_pcimem(u132, control, &u132->hc_control); if (retval) { dev_err(&u132->platform_dev->dev, "error %d accessing device co" "ntrol\n", retval); return retval; } retval = u132_read_pcimem(u132, roothub.status, &u132->hc_roothub_status); if (retval) { dev_err(&u132->platform_dev->dev, "error %d accessing device re" "g roothub.status\n", retval); return retval; } retval = u132_read_pcimem(u132, roothub.a, &u132->hc_roothub_a); if (retval) { dev_err(&u132->platform_dev->dev, "error %d accessing device re" "g roothub.a\n", retval); return retval; } { int I = u132->num_ports; int i = 0; while (I-- > 0) { retval = u132_read_pcimem(u132, roothub.portstatus[i], &u132->hc_roothub_portstatus[i]); if (retval) { dev_err(&u132->platform_dev->dev, "error %d acc" "essing device roothub.portstatus[%d]\n" , retval, i); return retval; } else i += 1; } } return 0; } static void u132_hcd_monitor_work(struct work_struct *work) { struct u132 *u132 = container_of(work, struct u132, monitor.work); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); u132_monitor_put_kref(u132); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); u132_monitor_put_kref(u132); return; } else { int retval; mutex_lock(&u132->sw_lock); retval = read_roothub_info(u132); if (retval) { struct usb_hcd *hcd = u132_to_hcd(u132); u132_disable(u132); u132->going = 1; mutex_unlock(&u132->sw_lock); usb_hc_died(hcd); ftdi_elan_gone_away(u132->platform_dev); u132_monitor_put_kref(u132); return; } else { u132_monitor_requeue_work(u132, 500); mutex_unlock(&u132->sw_lock); return; } } } static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp, struct urb *urb, int status) { struct u132_ring *ring; unsigned long irqs; struct usb_hcd *hcd = u132_to_hcd(u132); urb->error_count = 0; spin_lock_irqsave(&endp->queue_lock.slock, irqs); usb_hcd_unlink_urb_from_ep(hcd, urb); endp->queue_next += 1; if (ENDP_QUEUE_SIZE > --endp->queue_size) { endp->active = 0; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); } else { struct list_head *next = endp->urb_more.next; struct u132_urbq *urbq = list_entry(next, struct u132_urbq, urb_more); list_del(next); endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urbq->urb; endp->active = 0; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(urbq); } mutex_lock(&u132->scheduler_lock); ring = endp->ring; ring->in_use = 0; u132_ring_cancel_work(u132, ring); u132_ring_queue_work(u132, ring, 0); mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); usb_hcd_giveback_urb(hcd, urb, status); } static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp, struct urb *urb, int status) { u132_endp_put_kref(u132, endp); } static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp, struct urb *urb, int status) { unsigned long irqs; struct usb_hcd *hcd = u132_to_hcd(u132); urb->error_count = 0; spin_lock_irqsave(&endp->queue_lock.slock, irqs); usb_hcd_unlink_urb_from_ep(hcd, urb); endp->queue_next += 1; if (ENDP_QUEUE_SIZE > --endp->queue_size) { endp->active = 0; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); } else { struct list_head *next = endp->urb_more.next; struct u132_urbq *urbq = list_entry(next, struct u132_urbq, urb_more); list_del(next); endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urbq->urb; endp->active = 0; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(urbq); } usb_hcd_giveback_urb(hcd, urb, status); } static inline int edset_input(struct u132 *u132, struct u132_ring *ring, struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, toggle_bits, callback); } static inline int edset_setup(struct u132 *u132, struct u132_ring *ring, struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, toggle_bits, callback); } static inline int edset_single(struct u132 *u132, struct u132_ring *ring, struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, toggle_bits, callback); } static inline int edset_output(struct u132 *u132, struct u132_ring *ring, struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null)) { return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, toggle_bits, callback); } /* * must not LOCK sw_lock * */ static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; struct u132_udev *udev = &u132->udev[address]; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { struct u132_ring *ring = endp->ring; u8 *u = urb->transfer_buffer + urb->actual_length; u8 *b = buf; int L = len; while (L-- > 0) *u++ = *b++; urb->actual_length += len; if ((condition_code == TD_CC_NOERROR) && (urb->transfer_buffer_length > urb->actual_length)) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); if (urb->actual_length > 0) { int retval; mutex_unlock(&u132->scheduler_lock); retval = edset_single(u132, ring, endp, urb, address, endp->toggle_bits, u132_hcd_interrupt_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); } else { ring->in_use = 0; endp->active = 0; endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); u132_ring_cancel_work(u132, ring); u132_ring_queue_work(u132, ring, 0); mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); } return; } else if ((condition_code == TD_DATAUNDERRUN) && ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else { if (condition_code == TD_CC_NOERROR) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); } else if (condition_code == TD_CC_STALL) { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); } else { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); dev_err(&u132->platform_dev->dev, "urb=%p givin" "g back INTERRUPT %s\n", urb, cc_to_text[condition_code]); } mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { struct u132_ring *ring = endp->ring; urb->actual_length += len; endp->toggle_bits = toggle_bits; if (urb->transfer_buffer_length > urb->actual_length) { int retval; mutex_unlock(&u132->scheduler_lock); retval = edset_output(u132, ring, endp, urb, address, endp->toggle_bits, u132_hcd_bulk_output_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; struct u132_udev *udev = &u132->udev[address]; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { struct u132_ring *ring = endp->ring; u8 *u = urb->transfer_buffer + urb->actual_length; u8 *b = buf; int L = len; while (L-- > 0) *u++ = *b++; urb->actual_length += len; if ((condition_code == TD_CC_NOERROR) && (urb->transfer_buffer_length > urb->actual_length)) { int retval; endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, endp->toggle_bits, u132_hcd_bulk_input_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else if (condition_code == TD_CC_NOERROR) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } else if ((condition_code == TD_DATAUNDERRUN) && ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else if (condition_code == TD_DATAUNDERRUN) { endp->toggle_bits = toggle_bits; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 1 & toggle_bits); dev_warn(&u132->platform_dev->dev, "urb=%p(SHORT NOT OK" ") giving back BULK IN %s\n", urb, cc_to_text[condition_code]); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else if (condition_code == TD_CC_STALL) { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } else { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); dev_err(&u132->platform_dev->dev, "urb=%p giving back B" "ULK IN code=%d %s\n", urb, condition_code, cc_to_text[condition_code]); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { struct u132_ring *ring = endp->ring; u8 *u = urb->transfer_buffer; u8 *b = buf; int L = len; while (L-- > 0) *u++ = *b++; urb->actual_length = len; if ((condition_code == TD_CC_NOERROR) || ((condition_code == TD_DATAUNDERRUN) && ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0))) { int retval; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_empty(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, 0x3, u132_hcd_configure_empty_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else if (condition_code == TD_CC_STALL) { mutex_unlock(&u132->scheduler_lock); dev_warn(&u132->platform_dev->dev, "giving back SETUP I" "NPUT STALL urb %p\n", urb); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } else { mutex_unlock(&u132->scheduler_lock); dev_err(&u132->platform_dev->dev, "giving back SETUP IN" "PUT %s urb %p\n", cc_to_text[condition_code], urb); u132_hcd_giveback_urb(u132, endp, urb, cc_to_error[condition_code]); return; } } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { if (usb_pipein(urb->pipe)) { int retval; struct u132_ring *ring = endp->ring; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, 0, u132_hcd_configure_input_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { int retval; struct u132_ring *ring = endp->ring; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, 0, u132_hcd_configure_empty_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; struct u132_udev *udev = &u132->udev[address]; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { u132->addr[0].address = 0; endp->usb_addr = udev->usb_addr; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { int retval; struct u132_ring *ring = endp->ring; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, 0, endp->usb_endp, 0, u132_hcd_enumeration_empty_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { int retval; struct u132_ring *ring = endp->ring; u8 *u = urb->transfer_buffer; u8 *b = buf; int L = len; while (L-- > 0) *u++ = *b++; urb->actual_length = len; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_empty(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, 0x3, u132_hcd_initial_empty_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf, int len, int toggle_bits, int error_count, int condition_code, int repeat_number, int halted, int skipped, int actual, int non_null) { struct u132_endp *endp = data; struct u132 *u132 = endp->u132; u8 address = u132->addr[endp->usb_addr].address; mutex_lock(&u132->scheduler_lock); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); mutex_unlock(&u132->scheduler_lock); u132_hcd_forget_urb(u132, endp, urb, -ENODEV); return; } else if (endp->dequeueing) { endp->dequeueing = 0; mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -EINTR); return; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); return; } else if (!urb->unlinked) { int retval; struct u132_ring *ring = endp->ring; mutex_unlock(&u132->scheduler_lock); retval = usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, urb, address, endp->usb_endp, 0, u132_hcd_initial_input_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p " "unlinked=%d\n", urb, urb->unlinked); mutex_unlock(&u132->scheduler_lock); u132_hcd_giveback_urb(u132, endp, urb, 0); return; } } /* * this work function is only executed from the work queue * */ static void u132_hcd_ring_work_scheduler(struct work_struct *work) { struct u132_ring *ring = container_of(work, struct u132_ring, scheduler.work); struct u132 *u132 = ring->u132; mutex_lock(&u132->scheduler_lock); if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_ring_put_kref(u132, ring); return; } else if (ring->curr_endp) { struct u132_endp *last_endp = ring->curr_endp; struct list_head *scan; struct list_head *head = &last_endp->endp_ring; unsigned long wakeup = 0; list_for_each(scan, head) { struct u132_endp *endp = list_entry(scan, struct u132_endp, endp_ring); if (endp->queue_next == endp->queue_last) { } else if ((endp->delayed == 0) || time_after_eq(jiffies, endp->jiffies)) { ring->curr_endp = endp; u132_endp_cancel_work(u132, last_endp); u132_endp_queue_work(u132, last_endp, 0); mutex_unlock(&u132->scheduler_lock); u132_ring_put_kref(u132, ring); return; } else { unsigned long delta = endp->jiffies - jiffies; if (delta > wakeup) wakeup = delta; } } if (last_endp->queue_next == last_endp->queue_last) { } else if ((last_endp->delayed == 0) || time_after_eq(jiffies, last_endp->jiffies)) { u132_endp_cancel_work(u132, last_endp); u132_endp_queue_work(u132, last_endp, 0); mutex_unlock(&u132->scheduler_lock); u132_ring_put_kref(u132, ring); return; } else { unsigned long delta = last_endp->jiffies - jiffies; if (delta > wakeup) wakeup = delta; } if (wakeup > 0) { u132_ring_requeue_work(u132, ring, wakeup); mutex_unlock(&u132->scheduler_lock); return; } else { mutex_unlock(&u132->scheduler_lock); u132_ring_put_kref(u132, ring); return; } } else { mutex_unlock(&u132->scheduler_lock); u132_ring_put_kref(u132, ring); return; } } static void u132_hcd_endp_work_scheduler(struct work_struct *work) { struct u132_ring *ring; struct u132_endp *endp = container_of(work, struct u132_endp, scheduler.work); struct u132 *u132 = endp->u132; mutex_lock(&u132->scheduler_lock); ring = endp->ring; if (endp->edset_flush) { endp->edset_flush = 0; if (endp->dequeueing) usb_ftdi_elan_edset_flush(u132->platform_dev, ring->number, endp); mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else if (endp->active) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else if (endp->queue_next == endp->queue_last) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else if (endp->pipetype == PIPE_INTERRUPT) { u8 address = u132->addr[endp->usb_addr].address; if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else { int retval; struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_single(u132, ring, endp, urb, address, endp->toggle_bits, u132_hcd_interrupt_recv); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } } else if (endp->pipetype == PIPE_CONTROL) { u8 address = u132->addr[endp->usb_addr].address; if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else if (address == 0) { int retval; struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_setup(u132, ring, endp, urb, address, 0x2, u132_hcd_initial_setup_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else if (endp->usb_addr == 0) { int retval; struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_setup(u132, ring, endp, urb, 0, 0x2, u132_hcd_enumeration_address_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } else { int retval; struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]; address = u132->addr[endp->usb_addr].address; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_setup(u132, ring, endp, urb, address, 0x2, u132_hcd_configure_setup_sent); if (retval != 0) u132_hcd_giveback_urb(u132, endp, urb, retval); return; } } else { if (endp->input) { u8 address = u132->addr[endp->usb_addr].address; if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else { int retval; struct urb *urb = endp->urb_list[ ENDP_QUEUE_MASK & endp->queue_next]; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_input(u132, ring, endp, urb, address, endp->toggle_bits, u132_hcd_bulk_input_recv); if (retval == 0) { } else u132_hcd_giveback_urb(u132, endp, urb, retval); return; } } else { /* output pipe */ u8 address = u132->addr[endp->usb_addr].address; if (ring->in_use) { mutex_unlock(&u132->scheduler_lock); u132_endp_put_kref(u132, endp); return; } else { int retval; struct urb *urb = endp->urb_list[ ENDP_QUEUE_MASK & endp->queue_next]; endp->active = 1; ring->curr_endp = endp; ring->in_use = 1; mutex_unlock(&u132->scheduler_lock); retval = edset_output(u132, ring, endp, urb, address, endp->toggle_bits, u132_hcd_bulk_output_sent); if (retval == 0) { } else u132_hcd_giveback_urb(u132, endp, urb, retval); return; } } } } #ifdef CONFIG_PM static void port_power(struct u132 *u132, int pn, int is_on) { u132->port[pn].power = is_on; } #endif static void u132_power(struct u132 *u132, int is_on) { struct usb_hcd *hcd = u132_to_hcd(u132) ; /* hub is inactive unless the port is powered */ if (is_on) { if (u132->power) return; u132->power = 1; } else { u132->power = 0; hcd->state = HC_STATE_HALT; } } static int u132_periodic_reinit(struct u132 *u132) { int retval; u32 fi = u132->hc_fminterval & 0x03fff; u32 fit; u32 fminterval; retval = u132_read_pcimem(u132, fminterval, &fminterval); if (retval) return retval; fit = fminterval & FIT; retval = u132_write_pcimem(u132, fminterval, (fit ^ FIT) | u132->hc_fminterval); if (retval) return retval; retval = u132_write_pcimem(u132, periodicstart, ((9 * fi) / 10) & 0x3fff); if (retval) return retval; return 0; } static char *hcfs2string(int state) { switch (state) { case OHCI_USB_RESET: return "reset"; case OHCI_USB_RESUME: return "resume"; case OHCI_USB_OPER: return "operational"; case OHCI_USB_SUSPEND: return "suspend"; } return "?"; } static int u132_init(struct u132 *u132) { int retval; u32 control; u132_disable(u132); u132->next_statechange = jiffies; retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE); if (retval) return retval; retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; if (u132->num_ports == 0) { u32 rh_a = -1; retval = u132_read_pcimem(u132, roothub.a, &rh_a); if (retval) return retval; u132->num_ports = rh_a & RH_A_NDP; retval = read_roothub_info(u132); if (retval) return retval; } if (u132->num_ports > MAX_U132_PORTS) return -EINVAL; return 0; } /* Start an OHCI controller, set the BUS operational * resets USB and controller * enable interrupts */ static int u132_run(struct u132 *u132) { int retval; u32 control; u32 status; u32 fminterval; u32 periodicstart; u32 cmdstatus; u32 roothub_a; int mask = OHCI_INTR_INIT; int first = u132->hc_fminterval == 0; int sleep_time = 0; int reset_timeout = 30; /* ... allow extra time */ u132_disable(u132); if (first) { u32 temp; retval = u132_read_pcimem(u132, fminterval, &temp); if (retval) return retval; u132->hc_fminterval = temp & 0x3fff; u132->hc_fminterval |= FSMP(u132->hc_fminterval) << 16; } retval = u132_read_pcimem(u132, control, &u132->hc_control); if (retval) return retval; dev_info(&u132->platform_dev->dev, "resetting from state '%s', control " "= %08X\n", hcfs2string(u132->hc_control & OHCI_CTRL_HCFS), u132->hc_control); switch (u132->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: sleep_time = 0; break; case OHCI_USB_SUSPEND: case OHCI_USB_RESUME: u132->hc_control &= OHCI_CTRL_RWC; u132->hc_control |= OHCI_USB_RESUME; sleep_time = 10; break; default: u132->hc_control &= OHCI_CTRL_RWC; u132->hc_control |= OHCI_USB_RESET; sleep_time = 50; break; } retval = u132_write_pcimem(u132, control, u132->hc_control); if (retval) return retval; retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; msleep(sleep_time); retval = u132_read_pcimem(u132, roothub.a, &roothub_a); if (retval) return retval; if (!(roothub_a & RH_A_NPS)) { int temp; /* power down each port */ for (temp = 0; temp < u132->num_ports; temp++) { retval = u132_write_pcimem(u132, roothub.portstatus[temp], RH_PS_LSDA); if (retval) return retval; } } retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; retry: retval = u132_read_pcimem(u132, cmdstatus, &status); if (retval) return retval; retval = u132_write_pcimem(u132, cmdstatus, OHCI_HCR); if (retval) return retval; extra: { retval = u132_read_pcimem(u132, cmdstatus, &status); if (retval) return retval; if (0 != (status & OHCI_HCR)) { if (--reset_timeout == 0) { dev_err(&u132->platform_dev->dev, "USB HC reset" " timed out!\n"); return -ENODEV; } else { msleep(5); goto extra; } } } if (u132->flags & OHCI_QUIRK_INITRESET) { retval = u132_write_pcimem(u132, control, u132->hc_control); if (retval) return retval; retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; } retval = u132_write_pcimem(u132, ed_controlhead, 0x00000000); if (retval) return retval; retval = u132_write_pcimem(u132, ed_bulkhead, 0x11000000); if (retval) return retval; retval = u132_write_pcimem(u132, hcca, 0x00000000); if (retval) return retval; retval = u132_periodic_reinit(u132); if (retval) return retval; retval = u132_read_pcimem(u132, fminterval, &fminterval); if (retval) return retval; retval = u132_read_pcimem(u132, periodicstart, &periodicstart); if (retval) return retval; if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) { if (!(u132->flags & OHCI_QUIRK_INITRESET)) { u132->flags |= OHCI_QUIRK_INITRESET; goto retry; } else dev_err(&u132->platform_dev->dev, "init err(%08x %04x)" "\n", fminterval, periodicstart); } /* start controller operations */ u132->hc_control &= OHCI_CTRL_RWC; u132->hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER; retval = u132_write_pcimem(u132, control, u132->hc_control); if (retval) return retval; retval = u132_write_pcimem(u132, cmdstatus, OHCI_BLF); if (retval) return retval; retval = u132_read_pcimem(u132, cmdstatus, &cmdstatus); if (retval) return retval; retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; u132_to_hcd(u132)->state = HC_STATE_RUNNING; retval = u132_write_pcimem(u132, roothub.status, RH_HS_DRWE); if (retval) return retval; retval = u132_write_pcimem(u132, intrstatus, mask); if (retval) return retval; retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO | OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH | OHCI_INTR_SO); if (retval) return retval; /* handle root hub init quirks ... */ retval = u132_read_pcimem(u132, roothub.a, &roothub_a); if (retval) return retval; roothub_a &= ~(RH_A_PSM | RH_A_OCPM); if (u132->flags & OHCI_QUIRK_SUPERIO) { roothub_a |= RH_A_NOCP; roothub_a &= ~(RH_A_POTPGT | RH_A_NPS); retval = u132_write_pcimem(u132, roothub.a, roothub_a); if (retval) return retval; } else if ((u132->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { roothub_a |= RH_A_NPS; retval = u132_write_pcimem(u132, roothub.a, roothub_a); if (retval) return retval; } retval = u132_write_pcimem(u132, roothub.status, RH_HS_LPSC); if (retval) return retval; retval = u132_write_pcimem(u132, roothub.b, (roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM); if (retval) return retval; retval = u132_read_pcimem(u132, control, &control); if (retval) return retval; mdelay((roothub_a >> 23) & 0x1fe); u132_to_hcd(u132)->state = HC_STATE_RUNNING; return 0; } static void u132_hcd_stop(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p) has b" "een removed %d\n", u132, hcd, u132->going); } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov" "ed\n", hcd); } else { mutex_lock(&u132->sw_lock); msleep(100); u132_power(u132, 0); mutex_unlock(&u132->sw_lock); } } static int u132_hcd_start(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else if (hcd->self.controller) { int retval; struct platform_device *pdev = to_platform_device(hcd->self.controller); u16 vendor = ((struct u132_platform_data *) (pdev->dev.platform_data))->vendor; u16 device = ((struct u132_platform_data *) (pdev->dev.platform_data))->device; mutex_lock(&u132->sw_lock); msleep(10); if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) { u132->flags = OHCI_QUIRK_AMD756; } else if (vendor == PCI_VENDOR_ID_OPTI && device == 0xc861) { dev_err(&u132->platform_dev->dev, "WARNING: OPTi workar" "ounds unavailable\n"); } else if (vendor == PCI_VENDOR_ID_COMPAQ && device == 0xa0f8) u132->flags |= OHCI_QUIRK_ZFMICRO; retval = u132_run(u132); if (retval) { u132_disable(u132); u132->going = 1; } msleep(100); mutex_unlock(&u132->sw_lock); return retval; } else { dev_err(&u132->platform_dev->dev, "platform_device missing\n"); return -ENODEV; } } static int u132_hcd_reset(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else { int retval; mutex_lock(&u132->sw_lock); retval = u132_init(u132); if (retval) { u132_disable(u132); u132->going = 1; } mutex_unlock(&u132->sw_lock); return retval; } } static int create_endpoint_and_queue_int(struct u132 *u132, struct u132_udev *udev, struct urb *urb, struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, gfp_t mem_flags) { struct u132_ring *ring; unsigned long irqs; int rc; u8 endp_number; struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); if (!endp) return -ENOMEM; spin_lock_init(&endp->queue_lock.slock); spin_lock_irqsave(&endp->queue_lock.slock, irqs); rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); if (rc) { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(endp); return rc; } endp_number = ++u132->num_endpoints; urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); INIT_LIST_HEAD(&endp->urb_more); ring = endp->ring = &u132->ring[0]; if (ring->curr_endp) { list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); } else { INIT_LIST_HEAD(&endp->endp_ring); ring->curr_endp = endp; } ring->length += 1; endp->dequeueing = 0; endp->edset_flush = 0; endp->active = 0; endp->delayed = 0; endp->endp_number = endp_number; endp->u132 = u132; endp->hep = urb->ep; endp->pipetype = usb_pipetype(urb->pipe); u132_endp_init_kref(u132, endp); if (usb_pipein(urb->pipe)) { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, usb_endp, 0, 0); endp->input = 1; endp->output = 0; udev->endp_number_in[usb_endp] = endp_number; u132_udev_get_kref(u132, udev); } else { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, usb_endp, 1, 0); endp->input = 0; endp->output = 1; udev->endp_number_out[usb_endp] = endp_number; u132_udev_get_kref(u132, udev); } urb->hcpriv = u132; endp->delayed = 1; endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); endp->udev_number = address; endp->usb_addr = usb_addr; endp->usb_endp = usb_endp; endp->queue_size = 1; endp->queue_last = 0; endp->queue_next = 0; endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval)); return 0; } static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, struct urb *urb, struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, u8 usb_endp, u8 address) { urb->hcpriv = u132; endp->delayed = 1; endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); if (endp->queue_size++ < ENDP_QUEUE_SIZE) { endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; } else { struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC); if (urbq == NULL) { endp->queue_size -= 1; return -ENOMEM; } else { list_add_tail(&urbq->urb_more, &endp->urb_more); urbq->urb = urb; } } return 0; } static int create_endpoint_and_queue_bulk(struct u132 *u132, struct u132_udev *udev, struct urb *urb, struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address, gfp_t mem_flags) { int ring_number; struct u132_ring *ring; unsigned long irqs; int rc; u8 endp_number; struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); if (!endp) return -ENOMEM; spin_lock_init(&endp->queue_lock.slock); spin_lock_irqsave(&endp->queue_lock.slock, irqs); rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); if (rc) { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(endp); return rc; } endp_number = ++u132->num_endpoints; urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); INIT_LIST_HEAD(&endp->urb_more); endp->dequeueing = 0; endp->edset_flush = 0; endp->active = 0; endp->delayed = 0; endp->endp_number = endp_number; endp->u132 = u132; endp->hep = urb->ep; endp->pipetype = usb_pipetype(urb->pipe); u132_endp_init_kref(u132, endp); if (usb_pipein(urb->pipe)) { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, usb_endp, 0, 0); ring_number = 3; endp->input = 1; endp->output = 0; udev->endp_number_in[usb_endp] = endp_number; u132_udev_get_kref(u132, udev); } else { endp->toggle_bits = 0x2; usb_settoggle(udev->usb_device, usb_endp, 1, 0); ring_number = 2; endp->input = 0; endp->output = 1; udev->endp_number_out[usb_endp] = endp_number; u132_udev_get_kref(u132, udev); } ring = endp->ring = &u132->ring[ring_number - 1]; if (ring->curr_endp) { list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); } else { INIT_LIST_HEAD(&endp->endp_ring); ring->curr_endp = endp; } ring->length += 1; urb->hcpriv = u132; endp->udev_number = address; endp->usb_addr = usb_addr; endp->usb_endp = usb_endp; endp->queue_size = 1; endp->queue_last = 0; endp->queue_next = 0; endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); u132_endp_queue_work(u132, endp, 0); return 0; } static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev, struct urb *urb, struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, u8 usb_endp, u8 address) { urb->hcpriv = u132; if (endp->queue_size++ < ENDP_QUEUE_SIZE) { endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; } else { struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC); if (urbq == NULL) { endp->queue_size -= 1; return -ENOMEM; } else { list_add_tail(&urbq->urb_more, &endp->urb_more); urbq->urb = urb; } } return 0; } static int create_endpoint_and_queue_control(struct u132 *u132, struct urb *urb, struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, gfp_t mem_flags) { struct u132_ring *ring; unsigned long irqs; int rc; u8 endp_number; struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); if (!endp) return -ENOMEM; spin_lock_init(&endp->queue_lock.slock); spin_lock_irqsave(&endp->queue_lock.slock, irqs); rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb); if (rc) { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(endp); return rc; } endp_number = ++u132->num_endpoints; urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); INIT_LIST_HEAD(&endp->urb_more); ring = endp->ring = &u132->ring[0]; if (ring->curr_endp) { list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); } else { INIT_LIST_HEAD(&endp->endp_ring); ring->curr_endp = endp; } ring->length += 1; endp->dequeueing = 0; endp->edset_flush = 0; endp->active = 0; endp->delayed = 0; endp->endp_number = endp_number; endp->u132 = u132; endp->hep = urb->ep; u132_endp_init_kref(u132, endp); u132_endp_get_kref(u132, endp); if (usb_addr == 0) { u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; endp->udev_number = address; endp->usb_addr = usb_addr; endp->usb_endp = usb_endp; endp->input = 1; endp->output = 1; endp->pipetype = usb_pipetype(urb->pipe); u132_udev_init_kref(u132, udev); u132_udev_get_kref(u132, udev); udev->endp_number_in[usb_endp] = endp_number; udev->endp_number_out[usb_endp] = endp_number; urb->hcpriv = u132; endp->queue_size = 1; endp->queue_last = 0; endp->queue_next = 0; endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); u132_endp_queue_work(u132, endp, 0); return 0; } else { /*(usb_addr > 0) */ u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; endp->udev_number = address; endp->usb_addr = usb_addr; endp->usb_endp = usb_endp; endp->input = 1; endp->output = 1; endp->pipetype = usb_pipetype(urb->pipe); u132_udev_get_kref(u132, udev); udev->enumeration = 2; udev->endp_number_in[usb_endp] = endp_number; udev->endp_number_out[usb_endp] = endp_number; urb->hcpriv = u132; endp->queue_size = 1; endp->queue_last = 0; endp->queue_next = 0; endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); u132_endp_queue_work(u132, endp, 0); return 0; } } static int queue_control_on_old_endpoint(struct u132 *u132, struct urb *urb, struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, u8 usb_endp) { if (usb_addr == 0) { if (usb_pipein(urb->pipe)) { urb->hcpriv = u132; if (endp->queue_size++ < ENDP_QUEUE_SIZE) { endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; } else { struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC); if (urbq == NULL) { endp->queue_size -= 1; return -ENOMEM; } else { list_add_tail(&urbq->urb_more, &endp->urb_more); urbq->urb = urb; } } return 0; } else { /* usb_pipeout(urb->pipe) */ struct u132_addr *addr = &u132->addr[usb_dev->devnum]; int I = MAX_U132_UDEVS; int i = 0; while (--I > 0) { struct u132_udev *udev = &u132->udev[++i]; if (udev->usb_device) { continue; } else { udev->enumeration = 1; u132->addr[0].address = i; endp->udev_number = i; udev->udev_number = i; udev->usb_addr = usb_dev->devnum; u132_udev_init_kref(u132, udev); udev->endp_number_in[usb_endp] = endp->endp_number; u132_udev_get_kref(u132, udev); udev->endp_number_out[usb_endp] = endp->endp_number; udev->usb_device = usb_dev; ((u8 *) (urb->setup_packet))[2] = addr->address = i; u132_udev_get_kref(u132, udev); break; } } if (I == 0) { dev_err(&u132->platform_dev->dev, "run out of d" "evice space\n"); return -EINVAL; } urb->hcpriv = u132; if (endp->queue_size++ < ENDP_QUEUE_SIZE) { endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; } else { struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC); if (urbq == NULL) { endp->queue_size -= 1; return -ENOMEM; } else { list_add_tail(&urbq->urb_more, &endp->urb_more); urbq->urb = urb; } } return 0; } } else { /*(usb_addr > 0) */ u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; urb->hcpriv = u132; if (udev->enumeration != 2) udev->enumeration = 2; if (endp->queue_size++ < ENDP_QUEUE_SIZE) { endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; } else { struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC); if (urbq == NULL) { endp->queue_size -= 1; return -ENOMEM; } else { list_add_tail(&urbq->urb_more, &endp->urb_more); urbq->urb = urb; } } return 0; } } static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct u132 *u132 = hcd_to_u132(hcd); if (irqs_disabled()) { if (__GFP_WAIT & mem_flags) { printk(KERN_ERR "invalid context for function that migh" "t sleep\n"); return -EINVAL; } } if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed " "urb=%p\n", urb); return -ESHUTDOWN; } else { u8 usb_addr = usb_pipedevice(urb->pipe); u8 usb_endp = usb_pipeendpoint(urb->pipe); struct usb_device *usb_dev = urb->dev; if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; struct u132_endp *endp = urb->ep->hcpriv; urb->actual_length = 0; if (endp) { unsigned long irqs; int retval; spin_lock_irqsave(&endp->queue_lock.slock, irqs); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval == 0) { retval = queue_int_on_old_endpoint( u132, udev, urb, usb_dev, endp, usb_addr, usb_endp, address); if (retval) usb_hcd_unlink_urb_from_ep( hcd, urb); } spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); if (retval) { return retval; } else { u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval)) ; return 0; } } else if (u132->num_endpoints == MAX_U132_ENDPS) { return -EINVAL; } else { /*(endp == NULL) */ return create_endpoint_and_queue_int(u132, udev, urb, usb_dev, usb_addr, usb_endp, address, mem_flags); } } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { dev_err(&u132->platform_dev->dev, "the hardware does no" "t support PIPE_ISOCHRONOUS\n"); return -EINVAL; } else if (usb_pipetype(urb->pipe) == PIPE_BULK) { u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; struct u132_endp *endp = urb->ep->hcpriv; urb->actual_length = 0; if (endp) { unsigned long irqs; int retval; spin_lock_irqsave(&endp->queue_lock.slock, irqs); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval == 0) { retval = queue_bulk_on_old_endpoint( u132, udev, urb, usb_dev, endp, usb_addr, usb_endp, address); if (retval) usb_hcd_unlink_urb_from_ep( hcd, urb); } spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); if (retval) { return retval; } else { u132_endp_queue_work(u132, endp, 0); return 0; } } else if (u132->num_endpoints == MAX_U132_ENDPS) { return -EINVAL; } else return create_endpoint_and_queue_bulk(u132, udev, urb, usb_dev, usb_addr, usb_endp, address, mem_flags); } else { struct u132_endp *endp = urb->ep->hcpriv; u16 urb_size = 8; u8 *b = urb->setup_packet; int i = 0; char data[30 * 3 + 4]; char *d = data; int m = (sizeof(data) - 1) / 3; int l = 0; data[0] = 0; while (urb_size-- > 0) { if (i > m) { } else if (i++ < m) { int w = sprintf(d, " %02X", *b++); d += w; l += w; } else d += sprintf(d, " .."); } if (endp) { unsigned long irqs; int retval; spin_lock_irqsave(&endp->queue_lock.slock, irqs); retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval == 0) { retval = queue_control_on_old_endpoint( u132, urb, usb_dev, endp, usb_addr, usb_endp); if (retval) usb_hcd_unlink_urb_from_ep( hcd, urb); } spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); if (retval) { return retval; } else { u132_endp_queue_work(u132, endp, 0); return 0; } } else if (u132->num_endpoints == MAX_U132_ENDPS) { return -EINVAL; } else return create_endpoint_and_queue_control(u132, urb, usb_dev, usb_addr, usb_endp, mem_flags); } } } static int dequeue_from_overflow_chain(struct u132 *u132, struct u132_endp *endp, struct urb *urb) { struct list_head *scan; struct list_head *head = &endp->urb_more; list_for_each(scan, head) { struct u132_urbq *urbq = list_entry(scan, struct u132_urbq, urb_more); if (urbq->urb == urb) { struct usb_hcd *hcd = u132_to_hcd(u132); list_del(scan); endp->queue_size -= 1; urb->error_count = 0; usb_hcd_giveback_urb(hcd, urb, 0); return 0; } else continue; } dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring" "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X" "\n", urb, endp->endp_number, endp, endp->ring->number, endp->input ? 'I' : ' ', endp->output ? 'O' : ' ', endp->usb_endp, endp->usb_addr, endp->queue_size, endp->queue_next, endp->queue_last); return -EINVAL; } static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, struct urb *urb, int status) { unsigned long irqs; int rc; spin_lock_irqsave(&endp->queue_lock.slock, irqs); rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status); if (rc) { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); return rc; } if (endp->queue_size == 0) { dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]" "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb, endp->endp_number, endp, endp->ring->number, endp->input ? 'I' : ' ', endp->output ? 'O' : ' ', endp->usb_endp, endp->usb_addr); spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); return -EINVAL; } if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) { if (endp->active) { endp->dequeueing = 1; endp->edset_flush = 1; u132_endp_queue_work(u132, endp, 0); spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); return 0; } else { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); u132_hcd_abandon_urb(u132, endp, urb, status); return 0; } } else { u16 queue_list = 0; u16 queue_size = endp->queue_size; u16 queue_scan = endp->queue_next; struct urb **urb_slot = NULL; while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) { if (urb == endp->urb_list[ENDP_QUEUE_MASK & ++queue_scan]) { urb_slot = &endp->urb_list[ENDP_QUEUE_MASK & queue_scan]; break; } else continue; } while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) { *urb_slot = endp->urb_list[ENDP_QUEUE_MASK & ++queue_scan]; urb_slot = &endp->urb_list[ENDP_QUEUE_MASK & queue_scan]; } if (urb_slot) { struct usb_hcd *hcd = u132_to_hcd(u132); usb_hcd_unlink_urb_from_ep(hcd, urb); endp->queue_size -= 1; if (list_empty(&endp->urb_more)) { spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); } else { struct list_head *next = endp->urb_more.next; struct u132_urbq *urbq = list_entry(next, struct u132_urbq, urb_more); list_del(next); *urb_slot = urbq->urb; spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); kfree(urbq); } urb->error_count = 0; usb_hcd_giveback_urb(hcd, urb, status); return 0; } else if (list_empty(&endp->urb_more)) { dev_err(&u132->platform_dev->dev, "urb=%p not found in " "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr" "=%d size=%d next=%04X last=%04X\n", urb, endp->endp_number, endp, endp->ring->number, endp->input ? 'I' : ' ', endp->output ? 'O' : ' ', endp->usb_endp, endp->usb_addr, endp->queue_size, endp->queue_next, endp->queue_last); spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); return -EINVAL; } else { int retval; usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb); retval = dequeue_from_overflow_chain(u132, endp, urb); spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); return retval; } } } static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 2) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else { u8 usb_addr = usb_pipedevice(urb->pipe); u8 usb_endp = usb_pipeendpoint(urb->pipe); u8 address = u132->addr[usb_addr].address; struct u132_udev *udev = &u132->udev[address]; if (usb_pipein(urb->pipe)) { u8 endp_number = udev->endp_number_in[usb_endp]; struct u132_endp *endp = u132->endp[endp_number - 1]; return u132_endp_urb_dequeue(u132, endp, urb, status); } else { u8 endp_number = udev->endp_number_out[usb_endp]; struct u132_endp *endp = u132->endp[endp_number - 1]; return u132_endp_urb_dequeue(u132, endp, urb, status); } } } static void u132_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 2) { dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p hep=%p" ") has been removed %d\n", u132, hcd, hep, u132->going); } else { struct u132_endp *endp = hep->hcpriv; if (endp) u132_endp_put_kref(u132, endp); } } static int u132_get_frame(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else { int frame = 0; dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); msleep(100); return frame; } } static int u132_roothub_descriptor(struct u132 *u132, struct usb_hub_descriptor *desc) { int retval; u16 temp; u32 rh_a = -1; u32 rh_b = -1; retval = u132_read_pcimem(u132, roothub.a, &rh_a); if (retval) return retval; desc->bDescriptorType = 0x29; desc->bPwrOn2PwrGood = (rh_a & RH_A_POTPGT) >> 24; desc->bHubContrCurrent = 0; desc->bNbrPorts = u132->num_ports; temp = 1 + (u132->num_ports / 8); desc->bDescLength = 7 + 2 * temp; temp = 0; if (rh_a & RH_A_NPS) temp |= 0x0002; if (rh_a & RH_A_PSM) temp |= 0x0001; if (rh_a & RH_A_NOCP) temp |= 0x0010; else if (rh_a & RH_A_OCPM) temp |= 0x0008; desc->wHubCharacteristics = cpu_to_le16(temp); retval = u132_read_pcimem(u132, roothub.b, &rh_b); if (retval) return retval; memset(desc->u.hs.DeviceRemovable, 0xff, sizeof(desc->u.hs.DeviceRemovable)); desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR; if (u132->num_ports > 7) { desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8; desc->u.hs.DeviceRemovable[2] = 0xff; } else desc->u.hs.DeviceRemovable[1] = 0xff; return 0; } static int u132_roothub_status(struct u132 *u132, __le32 *desc) { u32 rh_status = -1; int ret_status = u132_read_pcimem(u132, roothub.status, &rh_status); *desc = cpu_to_le32(rh_status); return ret_status; } static int u132_roothub_portstatus(struct u132 *u132, __le32 *desc, u16 wIndex) { if (wIndex == 0 || wIndex > u132->num_ports) { return -EINVAL; } else { int port = wIndex - 1; u32 rh_portstatus = -1; int ret_portstatus = u132_read_pcimem(u132, roothub.portstatus[port], &rh_portstatus); *desc = cpu_to_le32(rh_portstatus); if (*(u16 *) (desc + 2)) { dev_info(&u132->platform_dev->dev, "Port %d Status Chan" "ge = %08X\n", port, *desc); } return ret_portstatus; } } /* this timer value might be vendor-specific ... */ #define PORT_RESET_HW_MSEC 10 #define PORT_RESET_MSEC 10 /* wrap-aware logic morphed from <linux/jiffies.h> */ #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0) static int u132_roothub_portreset(struct u132 *u132, int port_index) { int retval; u32 fmnumber; u16 now; u16 reset_done; retval = u132_read_pcimem(u132, fmnumber, &fmnumber); if (retval) return retval; now = fmnumber; reset_done = now + PORT_RESET_MSEC; do { u32 portstat; do { retval = u132_read_pcimem(u132, roothub.portstatus[port_index], &portstat); if (retval) return retval; if (RH_PS_PRS & portstat) continue; else break; } while (tick_before(now, reset_done)); if (RH_PS_PRS & portstat) return -ENODEV; if (RH_PS_CCS & portstat) { if (RH_PS_PRSC & portstat) { retval = u132_write_pcimem(u132, roothub.portstatus[port_index], RH_PS_PRSC); if (retval) return retval; } } else break; /* start the next reset, sleep till it's probably done */ retval = u132_write_pcimem(u132, roothub.portstatus[port_index], RH_PS_PRS); if (retval) return retval; msleep(PORT_RESET_HW_MSEC); retval = u132_read_pcimem(u132, fmnumber, &fmnumber); if (retval) return retval; now = fmnumber; } while (tick_before(now, reset_done)); return 0; } static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue, u16 wIndex) { if (wIndex == 0 || wIndex > u132->num_ports) { return -EINVAL; } else { int retval; int port_index = wIndex - 1; struct u132_port *port = &u132->port[port_index]; port->Status &= ~(1 << wValue); switch (wValue) { case USB_PORT_FEAT_SUSPEND: retval = u132_write_pcimem(u132, roothub.portstatus[port_index], RH_PS_PSS); if (retval) return retval; return 0; case USB_PORT_FEAT_POWER: retval = u132_write_pcimem(u132, roothub.portstatus[port_index], RH_PS_PPS); if (retval) return retval; return 0; case USB_PORT_FEAT_RESET: retval = u132_roothub_portreset(u132, port_index); if (retval) return retval; return 0; default: return -EPIPE; } } } static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue, u16 wIndex) { if (wIndex == 0 || wIndex > u132->num_ports) { return -EINVAL; } else { int port_index = wIndex - 1; u32 temp; int retval; struct u132_port *port = &u132->port[port_index]; port->Status &= ~(1 << wValue); switch (wValue) { case USB_PORT_FEAT_ENABLE: temp = RH_PS_CCS; break; case USB_PORT_FEAT_C_ENABLE: temp = RH_PS_PESC; break; case USB_PORT_FEAT_SUSPEND: temp = RH_PS_POCI; if ((u132->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_OPER) { dev_err(&u132->platform_dev->dev, "TODO resume_" "root_hub\n"); } break; case USB_PORT_FEAT_C_SUSPEND: temp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: temp = RH_PS_LSDA; break; case USB_PORT_FEAT_C_CONNECTION: temp = RH_PS_CSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: temp = RH_PS_OCIC; break; case USB_PORT_FEAT_C_RESET: temp = RH_PS_PRSC; break; default: return -EPIPE; } retval = u132_write_pcimem(u132, roothub.portstatus[port_index], temp); if (retval) return retval; return 0; } } /* the virtual root hub timer IRQ checks for hub status*/ static int u132_hub_status_data(struct usb_hcd *hcd, char *buf) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device hcd=%p has been remov" "ed %d\n", hcd, u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov" "ed\n", hcd); return -ESHUTDOWN; } else { int i, changed = 0, length = 1; if (u132->flags & OHCI_QUIRK_AMD756) { if ((u132->hc_roothub_a & RH_A_NDP) > MAX_ROOT_PORTS) { dev_err(&u132->platform_dev->dev, "bogus NDP, r" "ereads as NDP=%d\n", u132->hc_roothub_a & RH_A_NDP); goto done; } } if (u132->hc_roothub_status & (RH_HS_LPSC | RH_HS_OCIC)) buf[0] = changed = 1; else buf[0] = 0; if (u132->num_ports > 7) { buf[1] = 0; length++; } for (i = 0; i < u132->num_ports; i++) { if (u132->hc_roothub_portstatus[i] & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; if (i < 7) buf[0] |= 1 << (i + 1); else buf[1] |= 1 << (i - 7); continue; } if (!(u132->hc_roothub_portstatus[i] & RH_PS_CCS)) continue; if ((u132->hc_roothub_portstatus[i] & RH_PS_PSS)) continue; } done: return changed ? length : 0; } } static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else { int retval = 0; mutex_lock(&u132->sw_lock); switch (typeReq) { case ClearHubFeature: switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: break; default: goto stall; } break; case SetHubFeature: switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: break; default: goto stall; } break; case ClearPortFeature:{ retval = u132_roothub_clearportfeature(u132, wValue, wIndex); if (retval) goto error; break; } case GetHubDescriptor:{ retval = u132_roothub_descriptor(u132, (struct usb_hub_descriptor *)buf); if (retval) goto error; break; } case GetHubStatus:{ retval = u132_roothub_status(u132, (__le32 *) buf); if (retval) goto error; break; } case GetPortStatus:{ retval = u132_roothub_portstatus(u132, (__le32 *) buf, wIndex); if (retval) goto error; break; } case SetPortFeature:{ retval = u132_roothub_setportfeature(u132, wValue, wIndex); if (retval) goto error; break; } default: goto stall; error: u132_disable(u132); u132->going = 1; break; stall: retval = -EPIPE; break; } mutex_unlock(&u132->sw_lock); return retval; } } static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else return 0; } #ifdef CONFIG_PM static int u132_bus_suspend(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else return 0; } static int u132_bus_resume(struct usb_hcd *hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else return 0; } #else #define u132_bus_suspend NULL #define u132_bus_resume NULL #endif static struct hc_driver u132_hc_driver = { .description = hcd_name, .hcd_priv_size = sizeof(struct u132), .irq = NULL, .flags = HCD_USB11 | HCD_MEMORY, .reset = u132_hcd_reset, .start = u132_hcd_start, .stop = u132_hcd_stop, .urb_enqueue = u132_urb_enqueue, .urb_dequeue = u132_urb_dequeue, .endpoint_disable = u132_endpoint_disable, .get_frame_number = u132_get_frame, .hub_status_data = u132_hub_status_data, .hub_control = u132_hub_control, .bus_suspend = u132_bus_suspend, .bus_resume = u132_bus_resume, .start_port_reset = u132_start_port_reset, }; /* * This function may be called by the USB core whilst the "usb_all_devices_rwsem" * is held for writing, thus this module must not call usb_remove_hcd() * synchronously - but instead should immediately stop activity to the * device and asynchronously call usb_remove_hcd() */ static int u132_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); if (hcd) { struct u132 *u132 = hcd_to_u132(hcd); if (u132->going++ > 1) { dev_err(&u132->platform_dev->dev, "already being remove" "d\n"); return -ENODEV; } else { int rings = MAX_U132_RINGS; int endps = MAX_U132_ENDPS; dev_err(&u132->platform_dev->dev, "removing device u132" ".%d\n", u132->sequence_num); msleep(100); mutex_lock(&u132->sw_lock); u132_monitor_cancel_work(u132); while (rings-- > 0) { struct u132_ring *ring = &u132->ring[rings]; u132_ring_cancel_work(u132, ring); } while (endps-- > 0) { struct u132_endp *endp = u132->endp[endps]; if (endp) u132_endp_cancel_work(u132, endp); } u132->going += 1; printk(KERN_INFO "removing device u132.%d\n", u132->sequence_num); mutex_unlock(&u132->sw_lock); usb_remove_hcd(hcd); u132_u132_put_kref(u132); return 0; } } else return 0; } static void u132_initialise(struct u132 *u132, struct platform_device *pdev) { int rings = MAX_U132_RINGS; int ports = MAX_U132_PORTS; int addrs = MAX_U132_ADDRS; int udevs = MAX_U132_UDEVS; int endps = MAX_U132_ENDPS; u132->board = pdev->dev.platform_data; u132->platform_dev = pdev; u132->power = 0; u132->reset = 0; mutex_init(&u132->sw_lock); mutex_init(&u132->scheduler_lock); while (rings-- > 0) { struct u132_ring *ring = &u132->ring[rings]; ring->u132 = u132; ring->number = rings + 1; ring->length = 0; ring->curr_endp = NULL; INIT_DELAYED_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler); } mutex_lock(&u132->sw_lock); INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); while (ports-- > 0) { struct u132_port *port = &u132->port[ports]; port->u132 = u132; port->reset = 0; port->enable = 0; port->power = 0; port->Status = 0; } while (addrs-- > 0) { struct u132_addr *addr = &u132->addr[addrs]; addr->address = 0; } while (udevs-- > 0) { struct u132_udev *udev = &u132->udev[udevs]; int i = ARRAY_SIZE(udev->endp_number_in); int o = ARRAY_SIZE(udev->endp_number_out); udev->usb_device = NULL; udev->udev_number = 0; udev->usb_addr = 0; udev->portnumber = 0; while (i-- > 0) udev->endp_number_in[i] = 0; while (o-- > 0) udev->endp_number_out[o] = 0; } while (endps-- > 0) u132->endp[endps] = NULL; mutex_unlock(&u132->sw_lock); } static int u132_probe(struct platform_device *pdev) { struct usb_hcd *hcd; int retval; u32 control; u32 rh_a = -1; u32 num_ports; msleep(100); if (u132_exiting > 0) return -ENODEV; retval = ftdi_write_pcimem(pdev, intrdisable, OHCI_INTR_MIE); if (retval) return retval; retval = ftdi_read_pcimem(pdev, control, &control); if (retval) return retval; retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a); if (retval) return retval; num_ports = rh_a & RH_A_NDP; /* refuse to confuse usbcore */ if (pdev->dev.dma_mask) return -EINVAL; hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { printk(KERN_ERR "failed to create the usb hcd struct for U132\n" ); ftdi_elan_gone_away(pdev); return -ENOMEM; } else { struct u132 *u132 = hcd_to_u132(hcd); retval = 0; hcd->rsrc_start = 0; mutex_lock(&u132_module_lock); list_add_tail(&u132->u132_list, &u132_static_list); u132->sequence_num = ++u132_instances; mutex_unlock(&u132_module_lock); u132_u132_init_kref(u132); u132_initialise(u132, pdev); hcd->product_desc = "ELAN U132 Host Controller"; retval = usb_add_hcd(hcd, 0, 0); if (retval != 0) { dev_err(&u132->platform_dev->dev, "init error %d\n", retval); u132_u132_put_kref(u132); return retval; } else { u132_monitor_queue_work(u132, 100); return 0; } } } #ifdef CONFIG_PM /* * for this device there's no useful distinction between the controller * and its root hub, except that the root hub only gets direct PM calls * when CONFIG_PM_RUNTIME is enabled. */ static int u132_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else { int retval = 0, ports; switch (state.event) { case PM_EVENT_FREEZE: retval = u132_bus_suspend(hcd); break; case PM_EVENT_SUSPEND: case PM_EVENT_HIBERNATE: ports = MAX_U132_PORTS; while (ports-- > 0) { port_power(u132, ports, 0); } break; } return retval; } } static int u132_resume(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct u132 *u132 = hcd_to_u132(hcd); if (u132->going > 1) { dev_err(&u132->platform_dev->dev, "device has been removed %d\n" , u132->going); return -ENODEV; } else if (u132->going > 0) { dev_err(&u132->platform_dev->dev, "device is being removed\n"); return -ESHUTDOWN; } else { int retval = 0; if (!u132->port[0].power) { int ports = MAX_U132_PORTS; while (ports-- > 0) { port_power(u132, ports, 1); } retval = 0; } else { retval = u132_bus_resume(hcd); } return retval; } } #else #define u132_suspend NULL #define u132_resume NULL #endif /* * this driver is loaded explicitly by ftdi_u132 * * the platform_driver struct is static because it is per type of module */ static struct platform_driver u132_platform_driver = { .probe = u132_probe, .remove = u132_remove, .suspend = u132_suspend, .resume = u132_resume, .driver = { .name = (char *)hcd_name, .owner = THIS_MODULE, }, }; static int __init u132_hcd_init(void) { int retval; INIT_LIST_HEAD(&u132_static_list); u132_instances = 0; u132_exiting = 0; mutex_init(&u132_module_lock); if (usb_disabled()) return -ENODEV; printk(KERN_INFO "driver %s\n", hcd_name); workqueue = create_singlethread_workqueue("u132"); retval = platform_driver_register(&u132_platform_driver); return retval; } module_init(u132_hcd_init); static void __exit u132_hcd_exit(void) { struct u132 *u132; struct u132 *temp; mutex_lock(&u132_module_lock); u132_exiting += 1; mutex_unlock(&u132_module_lock); list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) { platform_device_unregister(u132->platform_dev); } platform_driver_unregister(&u132_platform_driver); printk(KERN_INFO "u132-hcd driver deregistered\n"); wait_event(u132_hcd_wait, u132_instances == 0); flush_workqueue(workqueue); destroy_workqueue(workqueue); } module_exit(u132_hcd_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:u132_hcd");
gpl-2.0
bemolxd/android_kernel_x2xtreme-test
drivers/video/sh_mobile_meram.c
2407
20674
/* * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver * * Copyright (c) 2011 Damian Hobson-Garcia <dhobsong@igel.co.jp> * Takanari Hayama <taki@igel.co.jp> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <video/sh_mobile_meram.h> /* ----------------------------------------------------------------------------- * MERAM registers */ #define MEVCR1 0x4 #define MEVCR1_RST (1 << 31) #define MEVCR1_WD (1 << 30) #define MEVCR1_AMD1 (1 << 29) #define MEVCR1_AMD0 (1 << 28) #define MEQSEL1 0x40 #define MEQSEL2 0x44 #define MExxCTL 0x400 #define MExxCTL_BV (1 << 31) #define MExxCTL_BSZ_SHIFT 28 #define MExxCTL_MSAR_MASK (0x7ff << MExxCTL_MSAR_SHIFT) #define MExxCTL_MSAR_SHIFT 16 #define MExxCTL_NXT_MASK (0x1f << MExxCTL_NXT_SHIFT) #define MExxCTL_NXT_SHIFT 11 #define MExxCTL_WD1 (1 << 10) #define MExxCTL_WD0 (1 << 9) #define MExxCTL_WS (1 << 8) #define MExxCTL_CB (1 << 7) #define MExxCTL_WBF (1 << 6) #define MExxCTL_WF (1 << 5) #define MExxCTL_RF (1 << 4) #define MExxCTL_CM (1 << 3) #define MExxCTL_MD_READ (1 << 0) #define MExxCTL_MD_WRITE (2 << 0) #define MExxCTL_MD_ICB_WB (3 << 0) #define MExxCTL_MD_ICB (4 << 0) #define MExxCTL_MD_FB (7 << 0) #define MExxCTL_MD_MASK (7 << 0) #define MExxBSIZE 0x404 #define MExxBSIZE_RCNT_SHIFT 28 #define MExxBSIZE_YSZM1_SHIFT 16 #define MExxBSIZE_XSZM1_SHIFT 0 #define MExxMNCF 0x408 #define MExxMNCF_KWBNM_SHIFT 28 #define MExxMNCF_KRBNM_SHIFT 24 #define MExxMNCF_BNM_SHIFT 16 #define MExxMNCF_XBV (1 << 15) #define MExxMNCF_CPL_YCBCR444 (1 << 12) #define MExxMNCF_CPL_YCBCR420 (2 << 12) #define MExxMNCF_CPL_YCBCR422 (3 << 12) #define MExxMNCF_CPL_MSK (3 << 12) #define MExxMNCF_BL (1 << 2) #define MExxMNCF_LNM_SHIFT 0 #define MExxSARA 0x410 #define MExxSARB 0x414 #define MExxSBSIZE 0x418 #define MExxSBSIZE_HDV (1 << 31) #define MExxSBSIZE_HSZ16 (0 << 28) #define MExxSBSIZE_HSZ32 (1 << 28) #define MExxSBSIZE_HSZ64 (2 << 28) #define MExxSBSIZE_HSZ128 (3 << 28) #define MExxSBSIZE_SBSIZZ_SHIFT 0 #define MERAM_MExxCTL_VAL(next, addr) \ ((((next) << MExxCTL_NXT_SHIFT) & MExxCTL_NXT_MASK) | \ (((addr) << MExxCTL_MSAR_SHIFT) & MExxCTL_MSAR_MASK)) #define MERAM_MExxBSIZE_VAL(rcnt, yszm1, xszm1) \ (((rcnt) << MExxBSIZE_RCNT_SHIFT) | \ ((yszm1) << MExxBSIZE_YSZM1_SHIFT) | \ ((xszm1) << MExxBSIZE_XSZM1_SHIFT)) static const unsigned long common_regs[] = { MEVCR1, MEQSEL1, MEQSEL2, }; #define MERAM_REGS_SIZE ARRAY_SIZE(common_regs) static const unsigned long icb_regs[] = { MExxCTL, MExxBSIZE, MExxMNCF, MExxSARA, MExxSARB, MExxSBSIZE, }; #define ICB_REGS_SIZE ARRAY_SIZE(icb_regs) /* * sh_mobile_meram_icb - MERAM ICB information * @regs: Registers cache * @index: ICB index * @offset: MERAM block offset * @size: MERAM block size in KiB * @cache_unit: Bytes to cache per ICB * @pixelformat: Video pixel format of the data stored in the ICB * @current_reg: Which of Start Address Register A (0) or B (1) is in use */ struct sh_mobile_meram_icb { unsigned long regs[ICB_REGS_SIZE]; unsigned int index; unsigned long offset; unsigned int size; unsigned int cache_unit; unsigned int pixelformat; unsigned int current_reg; }; #define MERAM_ICB_NUM 32 struct sh_mobile_meram_fb_plane { struct sh_mobile_meram_icb *marker; struct sh_mobile_meram_icb *cache; }; struct sh_mobile_meram_fb_cache { unsigned int nplanes; struct sh_mobile_meram_fb_plane planes[2]; }; /* * sh_mobile_meram_priv - MERAM device * @base: Registers base address * @meram: MERAM physical address * @regs: Registers cache * @lock: Protects used_icb and icbs * @used_icb: Bitmask of used ICBs * @icbs: ICBs * @pool: Allocation pool to manage the MERAM */ struct sh_mobile_meram_priv { void __iomem *base; unsigned long meram; unsigned long regs[MERAM_REGS_SIZE]; struct mutex lock; unsigned long used_icb; struct sh_mobile_meram_icb icbs[MERAM_ICB_NUM]; struct gen_pool *pool; }; /* settings */ #define MERAM_GRANULARITY 1024 #define MERAM_SEC_LINE 15 #define MERAM_LINE_WIDTH 2048 /* ----------------------------------------------------------------------------- * Registers access */ #define MERAM_ICB_OFFSET(base, idx, off) ((base) + (off) + (idx) * 0x20) static inline void meram_write_icb(void __iomem *base, unsigned int idx, unsigned int off, unsigned long val) { iowrite32(val, MERAM_ICB_OFFSET(base, idx, off)); } static inline unsigned long meram_read_icb(void __iomem *base, unsigned int idx, unsigned int off) { return ioread32(MERAM_ICB_OFFSET(base, idx, off)); } static inline void meram_write_reg(void __iomem *base, unsigned int off, unsigned long val) { iowrite32(val, base + off); } static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off) { return ioread32(base + off); } /* ----------------------------------------------------------------------------- * MERAM allocation and free */ static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size) { return gen_pool_alloc(priv->pool, size); } static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem, size_t size) { gen_pool_free(priv->pool, mem, size); } /* ----------------------------------------------------------------------------- * LCDC cache planes allocation, init, cleanup and free */ /* Allocate ICBs and MERAM for a plane. */ static int meram_plane_alloc(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_plane *plane, size_t size) { unsigned long mem; unsigned long idx; idx = find_first_zero_bit(&priv->used_icb, 28); if (idx == 28) return -ENOMEM; plane->cache = &priv->icbs[idx]; idx = find_next_zero_bit(&priv->used_icb, 32, 28); if (idx == 32) return -ENOMEM; plane->marker = &priv->icbs[idx]; mem = meram_alloc(priv, size * 1024); if (mem == 0) return -ENOMEM; __set_bit(plane->marker->index, &priv->used_icb); __set_bit(plane->cache->index, &priv->used_icb); plane->marker->offset = mem - priv->meram; plane->marker->size = size; return 0; } /* Free ICBs and MERAM for a plane. */ static void meram_plane_free(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_plane *plane) { meram_free(priv, priv->meram + plane->marker->offset, plane->marker->size * 1024); __clear_bit(plane->marker->index, &priv->used_icb); __clear_bit(plane->cache->index, &priv->used_icb); } /* Is this a YCbCr(NV12, NV16 or NV24) colorspace? */ static int is_nvcolor(int cspace) { if (cspace == SH_MOBILE_MERAM_PF_NV || cspace == SH_MOBILE_MERAM_PF_NV24) return 1; return 0; } /* Set the next address to fetch. */ static void meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, unsigned long base_addr_y, unsigned long base_addr_c) { struct sh_mobile_meram_icb *icb = cache->planes[0].marker; unsigned long target; icb->current_reg ^= 1; target = icb->current_reg ? MExxSARB : MExxSARA; /* set the next address to fetch */ meram_write_icb(priv->base, cache->planes[0].cache->index, target, base_addr_y); meram_write_icb(priv->base, cache->planes[0].marker->index, target, base_addr_y + cache->planes[0].marker->cache_unit); if (cache->nplanes == 2) { meram_write_icb(priv->base, cache->planes[1].cache->index, target, base_addr_c); meram_write_icb(priv->base, cache->planes[1].marker->index, target, base_addr_c + cache->planes[1].marker->cache_unit); } } /* Get the next ICB address. */ static void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, struct sh_mobile_meram_fb_cache *cache, unsigned long *icb_addr_y, unsigned long *icb_addr_c) { struct sh_mobile_meram_icb *icb = cache->planes[0].marker; unsigned long icb_offset; if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0) icb_offset = 0x80000000 | (icb->current_reg << 29); else icb_offset = 0xc0000000 | (icb->current_reg << 23); *icb_addr_y = icb_offset | (cache->planes[0].marker->index << 24); if (cache->nplanes == 2) *icb_addr_c = icb_offset | (cache->planes[1].marker->index << 24); } #define MERAM_CALC_BYTECOUNT(x, y) \ (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) /* Initialize MERAM. */ static int meram_plane_init(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_plane *plane, unsigned int xres, unsigned int yres, unsigned int *out_pitch) { struct sh_mobile_meram_icb *marker = plane->marker; unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); unsigned long bnm; unsigned int lcdc_pitch; unsigned int xpitch; unsigned int line_cnt; unsigned int save_lines; /* adjust pitch to 1024, 2048, 4096 or 8192 */ lcdc_pitch = (xres - 1) | 1023; lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1); lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2); lcdc_pitch += 1; /* derive settings */ if (lcdc_pitch == 8192 && yres >= 1024) { lcdc_pitch = xpitch = MERAM_LINE_WIDTH; line_cnt = total_byte_count >> 11; *out_pitch = xres; save_lines = plane->marker->size / 16 / MERAM_SEC_LINE; save_lines *= MERAM_SEC_LINE; } else { xpitch = xres; line_cnt = yres; *out_pitch = lcdc_pitch; save_lines = plane->marker->size / (lcdc_pitch >> 10) / 2; save_lines &= 0xff; } bnm = (save_lines - 1) << 16; /* TODO: we better to check if we have enough MERAM buffer size */ /* set up ICB */ meram_write_icb(priv->base, plane->cache->index, MExxBSIZE, MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1)); meram_write_icb(priv->base, plane->marker->index, MExxBSIZE, MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1)); meram_write_icb(priv->base, plane->cache->index, MExxMNCF, bnm); meram_write_icb(priv->base, plane->marker->index, MExxMNCF, bnm); meram_write_icb(priv->base, plane->cache->index, MExxSBSIZE, xpitch); meram_write_icb(priv->base, plane->marker->index, MExxSBSIZE, xpitch); /* save a cache unit size */ plane->cache->cache_unit = xres * save_lines; plane->marker->cache_unit = xres * save_lines; /* * Set MERAM for framebuffer * * we also chain the cache_icb and the marker_icb. * we also split the allocated MERAM buffer between two ICBs. */ meram_write_icb(priv->base, plane->cache->index, MExxCTL, MERAM_MExxCTL_VAL(plane->marker->index, marker->offset) | MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM | MExxCTL_MD_FB); meram_write_icb(priv->base, plane->marker->index, MExxCTL, MERAM_MExxCTL_VAL(plane->cache->index, marker->offset + plane->marker->size / 2) | MExxCTL_WD1 | MExxCTL_WD0 | MExxCTL_WS | MExxCTL_CM | MExxCTL_MD_FB); return 0; } static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_plane *plane) { /* disable ICB */ meram_write_icb(priv->base, plane->cache->index, MExxCTL, MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF); meram_write_icb(priv->base, plane->marker->index, MExxCTL, MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF); plane->cache->cache_unit = 0; plane->marker->cache_unit = 0; } /* ----------------------------------------------------------------------------- * MERAM operations */ unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata, size_t size) { struct sh_mobile_meram_priv *priv = pdata->priv; return meram_alloc(priv, size); } EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc); void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem, size_t size) { struct sh_mobile_meram_priv *priv = pdata->priv; meram_free(priv, mem, size); } EXPORT_SYMBOL_GPL(sh_mobile_meram_free); /* Allocate memory for the ICBs and mark them as used. */ static struct sh_mobile_meram_fb_cache * meram_cache_alloc(struct sh_mobile_meram_priv *priv, const struct sh_mobile_meram_cfg *cfg, int pixelformat) { unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; struct sh_mobile_meram_fb_cache *cache; int ret; cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) return ERR_PTR(-ENOMEM); cache->nplanes = nplanes; ret = meram_plane_alloc(priv, &cache->planes[0], cfg->icb[0].meram_size); if (ret < 0) goto error; cache->planes[0].marker->current_reg = 1; cache->planes[0].marker->pixelformat = pixelformat; if (cache->nplanes == 1) return cache; ret = meram_plane_alloc(priv, &cache->planes[1], cfg->icb[1].meram_size); if (ret < 0) { meram_plane_free(priv, &cache->planes[0]); goto error; } return cache; error: kfree(cache); return ERR_PTR(-ENOMEM); } void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata, const struct sh_mobile_meram_cfg *cfg, unsigned int xres, unsigned int yres, unsigned int pixelformat, unsigned int *pitch) { struct sh_mobile_meram_fb_cache *cache; struct sh_mobile_meram_priv *priv = pdata->priv; struct platform_device *pdev = pdata->pdev; unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; unsigned int out_pitch; if (priv == NULL) return ERR_PTR(-ENODEV); if (pixelformat != SH_MOBILE_MERAM_PF_NV && pixelformat != SH_MOBILE_MERAM_PF_NV24 && pixelformat != SH_MOBILE_MERAM_PF_RGB) return ERR_PTR(-EINVAL); dev_dbg(&pdev->dev, "registering %dx%d (%s)", xres, yres, !pixelformat ? "yuv" : "rgb"); /* we can't handle wider than 8192px */ if (xres > 8192) { dev_err(&pdev->dev, "width exceeding the limit (> 8192)."); return ERR_PTR(-EINVAL); } if (cfg->icb[0].meram_size == 0) return ERR_PTR(-EINVAL); if (nplanes == 2 && cfg->icb[1].meram_size == 0) return ERR_PTR(-EINVAL); mutex_lock(&priv->lock); /* We now register the ICBs and allocate the MERAM regions. */ cache = meram_cache_alloc(priv, cfg, pixelformat); if (IS_ERR(cache)) { dev_err(&pdev->dev, "MERAM allocation failed (%ld).", PTR_ERR(cache)); goto err; } /* initialize MERAM */ meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); *pitch = out_pitch; if (pixelformat == SH_MOBILE_MERAM_PF_NV) meram_plane_init(priv, &cache->planes[1], xres, (yres + 1) / 2, &out_pitch); else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) meram_plane_init(priv, &cache->planes[1], 2 * xres, (yres + 1) / 2, &out_pitch); err: mutex_unlock(&priv->lock); return cache; } EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc); void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; mutex_lock(&priv->lock); /* Cleanup and free. */ meram_plane_cleanup(priv, &cache->planes[0]); meram_plane_free(priv, &cache->planes[0]); if (cache->nplanes == 2) { meram_plane_cleanup(priv, &cache->planes[1]); meram_plane_free(priv, &cache->planes[1]); } kfree(cache); mutex_unlock(&priv->lock); } EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free); void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data, unsigned long base_addr_y, unsigned long base_addr_c, unsigned long *icb_addr_y, unsigned long *icb_addr_c) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; mutex_lock(&priv->lock); meram_set_next_addr(priv, cache, base_addr_y, base_addr_c); meram_get_next_icb_addr(pdata, cache, icb_addr_y, icb_addr_c); mutex_unlock(&priv->lock); } EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); /* ----------------------------------------------------------------------------- * Power management */ static int sh_mobile_meram_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); unsigned int i, j; for (i = 0; i < MERAM_REGS_SIZE; i++) priv->regs[i] = meram_read_reg(priv->base, common_regs[i]); for (i = 0; i < 32; i++) { if (!test_bit(i, &priv->used_icb)) continue; for (j = 0; j < ICB_REGS_SIZE; j++) { priv->icbs[i].regs[j] = meram_read_icb(priv->base, i, icb_regs[j]); /* Reset ICB on resume */ if (icb_regs[j] == MExxCTL) priv->icbs[i].regs[j] |= MExxCTL_WBF | MExxCTL_WF | MExxCTL_RF; } } return 0; } static int sh_mobile_meram_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); unsigned int i, j; for (i = 0; i < 32; i++) { if (!test_bit(i, &priv->used_icb)) continue; for (j = 0; j < ICB_REGS_SIZE; j++) meram_write_icb(priv->base, i, icb_regs[j], priv->icbs[i].regs[j]); } for (i = 0; i < MERAM_REGS_SIZE; i++) meram_write_reg(priv->base, common_regs[i], priv->regs[i]); return 0; } static UNIVERSAL_DEV_PM_OPS(sh_mobile_meram_dev_pm_ops, sh_mobile_meram_suspend, sh_mobile_meram_resume, NULL); /* ----------------------------------------------------------------------------- * Probe/remove and driver init/exit */ static int sh_mobile_meram_probe(struct platform_device *pdev) { struct sh_mobile_meram_priv *priv; struct sh_mobile_meram_info *pdata = pdev->dev.platform_data; struct resource *regs; struct resource *meram; unsigned int i; int error; if (!pdata) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); meram = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (regs == NULL || meram == NULL) { dev_err(&pdev->dev, "cannot get platform resources\n"); return -ENOENT; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(&pdev->dev, "cannot allocate device data\n"); return -ENOMEM; } /* Initialize private data. */ mutex_init(&priv->lock); priv->used_icb = pdata->reserved_icbs; for (i = 0; i < MERAM_ICB_NUM; ++i) priv->icbs[i].index = i; pdata->priv = priv; pdata->pdev = pdev; /* Request memory regions and remap the registers. */ if (!request_mem_region(regs->start, resource_size(regs), pdev->name)) { dev_err(&pdev->dev, "MERAM registers region already claimed\n"); error = -EBUSY; goto err_req_regs; } if (!request_mem_region(meram->start, resource_size(meram), pdev->name)) { dev_err(&pdev->dev, "MERAM memory region already claimed\n"); error = -EBUSY; goto err_req_meram; } priv->base = ioremap_nocache(regs->start, resource_size(regs)); if (!priv->base) { dev_err(&pdev->dev, "ioremap failed\n"); error = -EFAULT; goto err_ioremap; } priv->meram = meram->start; /* Create and initialize the MERAM memory pool. */ priv->pool = gen_pool_create(ilog2(MERAM_GRANULARITY), -1); if (priv->pool == NULL) { error = -ENOMEM; goto err_genpool; } error = gen_pool_add(priv->pool, meram->start, resource_size(meram), -1); if (error < 0) goto err_genpool; /* initialize ICB addressing mode */ if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1) meram_write_reg(priv->base, MEVCR1, MEVCR1_AMD1); platform_set_drvdata(pdev, priv); pm_runtime_enable(&pdev->dev); dev_info(&pdev->dev, "sh_mobile_meram initialized."); return 0; err_genpool: if (priv->pool) gen_pool_destroy(priv->pool); iounmap(priv->base); err_ioremap: release_mem_region(meram->start, resource_size(meram)); err_req_meram: release_mem_region(regs->start, resource_size(regs)); err_req_regs: mutex_destroy(&priv->lock); kfree(priv); return error; } static int sh_mobile_meram_remove(struct platform_device *pdev) { struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *meram = platform_get_resource(pdev, IORESOURCE_MEM, 1); pm_runtime_disable(&pdev->dev); gen_pool_destroy(priv->pool); iounmap(priv->base); release_mem_region(meram->start, resource_size(meram)); release_mem_region(regs->start, resource_size(regs)); mutex_destroy(&priv->lock); kfree(priv); return 0; } static struct platform_driver sh_mobile_meram_driver = { .driver = { .name = "sh_mobile_meram", .owner = THIS_MODULE, .pm = &sh_mobile_meram_dev_pm_ops, }, .probe = sh_mobile_meram_probe, .remove = sh_mobile_meram_remove, }; module_platform_driver(sh_mobile_meram_driver); MODULE_DESCRIPTION("SuperH Mobile MERAM driver"); MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama"); MODULE_LICENSE("GPL v2");
gpl-2.0
iAMr00t/android_kernel_huawei_msm8916
arch/powerpc/kernel/rtasd.c
2407
14805
/* * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Communication to userspace based on kernel/printk.c */ #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/spinlock.h> #include <linux/cpu.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/rtas.h> #include <asm/prom.h> #include <asm/nvram.h> #include <linux/atomic.h> #include <asm/machdep.h> #include <asm/topology.h> static DEFINE_SPINLOCK(rtasd_log_lock); static DECLARE_WAIT_QUEUE_HEAD(rtas_log_wait); static char *rtas_log_buf; static unsigned long rtas_log_start; static unsigned long rtas_log_size; static int surveillance_timeout = -1; static unsigned int rtas_error_log_max; static unsigned int rtas_error_log_buffer_max; /* RTAS service tokens */ static unsigned int event_scan; static unsigned int rtas_event_scan_rate; static int full_rtas_msgs = 0; /* Stop logging to nvram after first fatal error */ static int logging_enabled; /* Until we initialize everything, * make sure we don't try logging * anything */ static int error_log_cnt; /* * Since we use 32 bit RTAS, the physical address of this must be below * 4G or else bad things happen. Allocate this in the kernel data and * make it big enough. */ static unsigned char logdata[RTAS_ERROR_LOG_MAX]; static char *rtas_type[] = { "Unknown", "Retry", "TCE Error", "Internal Device Failure", "Timeout", "Data Parity", "Address Parity", "Cache Parity", "Address Invalid", "ECC Uncorrected", "ECC Corrupted", }; static char *rtas_event_type(int type) { if ((type > 0) && (type < 11)) return rtas_type[type]; switch (type) { case RTAS_TYPE_EPOW: return "EPOW"; case RTAS_TYPE_PLATFORM: return "Platform Error"; case RTAS_TYPE_IO: return "I/O Event"; case RTAS_TYPE_INFO: return "Platform Information Event"; case RTAS_TYPE_DEALLOC: return "Resource Deallocation Event"; case RTAS_TYPE_DUMP: return "Dump Notification Event"; case RTAS_TYPE_PRRN: return "Platform Resource Reassignment Event"; } return rtas_type[0]; } /* To see this info, grep RTAS /var/log/messages and each entry * will be collected together with obvious begin/end. * There will be a unique identifier on the begin and end lines. * This will persist across reboots. * * format of error logs returned from RTAS: * bytes (size) : contents * -------------------------------------------------------- * 0-7 (8) : rtas_error_log * 8-47 (40) : extended info * 48-51 (4) : vendor id * 52-1023 (vendor specific) : location code and debug data */ static void printk_log_rtas(char *buf, int len) { int i,j,n = 0; int perline = 16; char buffer[64]; char * str = "RTAS event"; if (full_rtas_msgs) { printk(RTAS_DEBUG "%d -------- %s begin --------\n", error_log_cnt, str); /* * Print perline bytes on each line, each line will start * with RTAS and a changing number, so syslogd will * print lines that are otherwise the same. Separate every * 4 bytes with a space. */ for (i = 0; i < len; i++) { j = i % perline; if (j == 0) { memset(buffer, 0, sizeof(buffer)); n = sprintf(buffer, "RTAS %d:", i/perline); } if ((i % 4) == 0) n += sprintf(buffer+n, " "); n += sprintf(buffer+n, "%02x", (unsigned char)buf[i]); if (j == (perline-1)) printk(KERN_DEBUG "%s\n", buffer); } if ((i % perline) != 0) printk(KERN_DEBUG "%s\n", buffer); printk(RTAS_DEBUG "%d -------- %s end ----------\n", error_log_cnt, str); } else { struct rtas_error_log *errlog = (struct rtas_error_log *)buf; printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", error_log_cnt, rtas_event_type(errlog->type), errlog->severity); } } static int log_rtas_len(char * buf) { int len; struct rtas_error_log *err; /* rtas fixed header */ len = 8; err = (struct rtas_error_log *)buf; if (err->extended && err->extended_log_length) { /* extended header */ len += err->extended_log_length; } if (rtas_error_log_max == 0) rtas_error_log_max = rtas_get_error_log_max(); if (len > rtas_error_log_max) len = rtas_error_log_max; return len; } /* * First write to nvram, if fatal error, that is the only * place we log the info. The error will be picked up * on the next reboot by rtasd. If not fatal, run the * method for the type of error. Currently, only RTAS * errors have methods implemented, but in the future * there might be a need to store data in nvram before a * call to panic(). * * XXX We write to nvram periodically, to indicate error has * been written and sync'd, but there is a possibility * that if we don't shutdown correctly, a duplicate error * record will be created on next reboot. */ void pSeries_log_error(char *buf, unsigned int err_type, int fatal) { unsigned long offset; unsigned long s; int len = 0; pr_debug("rtasd: logging event\n"); if (buf == NULL) return; spin_lock_irqsave(&rtasd_log_lock, s); /* get length and increase count */ switch (err_type & ERR_TYPE_MASK) { case ERR_TYPE_RTAS_LOG: len = log_rtas_len(buf); if (!(err_type & ERR_FLAG_BOOT)) error_log_cnt++; break; case ERR_TYPE_KERNEL_PANIC: default: WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } #ifdef CONFIG_PPC64 /* Write error to NVRAM */ if (logging_enabled && !(err_type & ERR_FLAG_BOOT)) nvram_write_error_log(buf, len, err_type, error_log_cnt); #endif /* CONFIG_PPC64 */ /* * rtas errors can occur during boot, and we do want to capture * those somewhere, even if nvram isn't ready (why not?), and even * if rtasd isn't ready. Put them into the boot log, at least. */ if ((err_type & ERR_TYPE_MASK) == ERR_TYPE_RTAS_LOG) printk_log_rtas(buf, len); /* Check to see if we need to or have stopped logging */ if (fatal || !logging_enabled) { logging_enabled = 0; WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } /* call type specific method for error */ switch (err_type & ERR_TYPE_MASK) { case ERR_TYPE_RTAS_LOG: offset = rtas_error_log_buffer_max * ((rtas_log_start+rtas_log_size) & LOG_NUMBER_MASK); /* First copy over sequence number */ memcpy(&rtas_log_buf[offset], (void *) &error_log_cnt, sizeof(int)); /* Second copy over error log data */ offset += sizeof(int); memcpy(&rtas_log_buf[offset], buf, len); if (rtas_log_size < LOG_NUMBER) rtas_log_size += 1; else rtas_log_start += 1; WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); wake_up_interruptible(&rtas_log_wait); break; case ERR_TYPE_KERNEL_PANIC: default: WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */ spin_unlock_irqrestore(&rtasd_log_lock, s); return; } } #ifdef CONFIG_PPC_PSERIES static s32 prrn_update_scope; static void prrn_work_fn(struct work_struct *work) { /* * For PRRN, we must pass the negative of the scope value in * the RTAS event. */ pseries_devicetree_update(-prrn_update_scope); } static DECLARE_WORK(prrn_work, prrn_work_fn); void prrn_schedule_update(u32 scope) { flush_work(&prrn_work); prrn_update_scope = scope; schedule_work(&prrn_work); } static void handle_rtas_event(const struct rtas_error_log *log) { if (log->type == RTAS_TYPE_PRRN) { /* For PRRN Events the extended log length is used to denote * the scope for calling rtas update-nodes. */ if (prrn_is_enabled()) prrn_schedule_update(log->extended_log_length); } return; } #else static void handle_rtas_event(const struct rtas_error_log *log) { return; } #endif static int rtas_log_open(struct inode * inode, struct file * file) { return 0; } static int rtas_log_release(struct inode * inode, struct file * file) { return 0; } /* This will check if all events are logged, if they are then, we * know that we can safely clear the events in NVRAM. * Next we'll sit and wait for something else to log. */ static ssize_t rtas_log_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { int error; char *tmp; unsigned long s; unsigned long offset; if (!buf || count < rtas_error_log_buffer_max) return -EINVAL; count = rtas_error_log_buffer_max; if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; tmp = kmalloc(count, GFP_KERNEL); if (!tmp) return -ENOMEM; spin_lock_irqsave(&rtasd_log_lock, s); /* if it's 0, then we know we got the last one (the one in NVRAM) */ while (rtas_log_size == 0) { if (file->f_flags & O_NONBLOCK) { spin_unlock_irqrestore(&rtasd_log_lock, s); error = -EAGAIN; goto out; } if (!logging_enabled) { spin_unlock_irqrestore(&rtasd_log_lock, s); error = -ENODATA; goto out; } #ifdef CONFIG_PPC64 nvram_clear_error_log(); #endif /* CONFIG_PPC64 */ spin_unlock_irqrestore(&rtasd_log_lock, s); error = wait_event_interruptible(rtas_log_wait, rtas_log_size); if (error) goto out; spin_lock_irqsave(&rtasd_log_lock, s); } offset = rtas_error_log_buffer_max * (rtas_log_start & LOG_NUMBER_MASK); memcpy(tmp, &rtas_log_buf[offset], count); rtas_log_start += 1; rtas_log_size -= 1; spin_unlock_irqrestore(&rtasd_log_lock, s); error = copy_to_user(buf, tmp, count) ? -EFAULT : count; out: kfree(tmp); return error; } static unsigned int rtas_log_poll(struct file *file, poll_table * wait) { poll_wait(file, &rtas_log_wait, wait); if (rtas_log_size) return POLLIN | POLLRDNORM; return 0; } static const struct file_operations proc_rtas_log_operations = { .read = rtas_log_read, .poll = rtas_log_poll, .open = rtas_log_open, .release = rtas_log_release, .llseek = noop_llseek, }; static int enable_surveillance(int timeout) { int error; error = rtas_set_indicator(SURVEILLANCE_TOKEN, 0, timeout); if (error == 0) return 0; if (error == -EINVAL) { printk(KERN_DEBUG "rtasd: surveillance not supported\n"); return 0; } printk(KERN_ERR "rtasd: could not update surveillance\n"); return -1; } static void do_event_scan(void) { int error; do { memset(logdata, 0, rtas_error_log_max); error = rtas_call(event_scan, 4, 1, NULL, RTAS_EVENT_SCAN_ALL_EVENTS, 0, __pa(logdata), rtas_error_log_max); if (error == -1) { printk(KERN_ERR "event-scan failed\n"); break; } if (error == 0) { pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0); handle_rtas_event((struct rtas_error_log *)logdata); } } while(error == 0); } static void rtas_event_scan(struct work_struct *w); DECLARE_DELAYED_WORK(event_scan_work, rtas_event_scan); /* * Delay should be at least one second since some machines have problems if * we call event-scan too quickly. */ static unsigned long event_scan_delay = 1*HZ; static int first_pass = 1; static void rtas_event_scan(struct work_struct *w) { unsigned int cpu; do_event_scan(); get_online_cpus(); /* raw_ OK because just using CPU as starting point. */ cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (cpu >= nr_cpu_ids) { cpu = cpumask_first(cpu_online_mask); if (first_pass) { first_pass = 0; event_scan_delay = 30*HZ/rtas_event_scan_rate; if (surveillance_timeout != -1) { pr_debug("rtasd: enabling surveillance\n"); enable_surveillance(surveillance_timeout); pr_debug("rtasd: surveillance enabled\n"); } } } schedule_delayed_work_on(cpu, &event_scan_work, __round_jiffies_relative(event_scan_delay, cpu)); put_online_cpus(); } #ifdef CONFIG_PPC64 static void retreive_nvram_error_log(void) { unsigned int err_type ; int rc ; /* See if we have any error stored in NVRAM */ memset(logdata, 0, rtas_error_log_max); rc = nvram_read_error_log(logdata, rtas_error_log_max, &err_type, &error_log_cnt); /* We can use rtas_log_buf now */ logging_enabled = 1; if (!rc) { if (err_type != ERR_FLAG_ALREADY_LOGGED) { pSeries_log_error(logdata, err_type | ERR_FLAG_BOOT, 0); } } } #else /* CONFIG_PPC64 */ static void retreive_nvram_error_log(void) { } #endif /* CONFIG_PPC64 */ static void start_event_scan(void) { printk(KERN_DEBUG "RTAS daemon started\n"); pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), &event_scan_work, event_scan_delay); } /* Cancel the rtas event scan work */ void rtas_cancel_event_scan(void) { cancel_delayed_work_sync(&event_scan_work); } EXPORT_SYMBOL_GPL(rtas_cancel_event_scan); static int __init rtas_init(void) { struct proc_dir_entry *entry; if (!machine_is(pseries) && !machine_is(chrp)) return 0; /* No RTAS */ event_scan = rtas_token("event-scan"); if (event_scan == RTAS_UNKNOWN_SERVICE) { printk(KERN_INFO "rtasd: No event-scan on system\n"); return -ENODEV; } rtas_event_scan_rate = rtas_token("rtas-event-scan-rate"); if (rtas_event_scan_rate == RTAS_UNKNOWN_SERVICE) { printk(KERN_ERR "rtasd: no rtas-event-scan-rate on system\n"); return -ENODEV; } if (!rtas_event_scan_rate) { /* Broken firmware: take a rate of zero to mean don't scan */ printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n"); return 0; } /* Make room for the sequence number */ rtas_error_log_max = rtas_get_error_log_max(); rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); rtas_log_buf = vmalloc(rtas_error_log_buffer_max*LOG_NUMBER); if (!rtas_log_buf) { printk(KERN_ERR "rtasd: no memory\n"); return -ENOMEM; } entry = proc_create("powerpc/rtas/error_log", S_IRUSR, NULL, &proc_rtas_log_operations); if (!entry) printk(KERN_ERR "Failed to create error_log proc entry\n"); start_event_scan(); return 0; } __initcall(rtas_init); static int __init surveillance_setup(char *str) { int i; /* We only do surveillance on pseries */ if (!machine_is(pseries)) return 0; if (get_option(&str,&i)) { if (i >= 0 && i <= 255) surveillance_timeout = i; } return 1; } __setup("surveillance=", surveillance_setup); static int __init rtasmsgs_setup(char *str) { if (strcmp(str, "on") == 0) full_rtas_msgs = 1; else if (strcmp(str, "off") == 0) full_rtas_msgs = 0; return 1; } __setup("rtasmsgs=", rtasmsgs_setup);
gpl-2.0
FrozenCow/FIRE-ICE
drivers/net/wireless/ti/wl18xx/cmd.c
2663
2153
/* * This file is part of wl18xx * * Copyright (C) 2011 Texas Instruments Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "../wlcore/cmd.h" #include "../wlcore/debug.h" #include "../wlcore/hw_ops.h" #include "cmd.h" int wl18xx_cmd_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct ieee80211_channel_switch *ch_switch) { struct wl18xx_cmd_channel_switch *cmd; u32 supported_rates; int ret; wl1271_debug(DEBUG_ACX, "cmd channel switch"); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { ret = -ENOMEM; goto out; } cmd->role_id = wlvif->role_id; cmd->channel = ch_switch->chandef.chan->hw_value; cmd->switch_time = ch_switch->count; cmd->stop_tx = ch_switch->block_tx; switch (ch_switch->chandef.chan->band) { case IEEE80211_BAND_2GHZ: cmd->band = WLCORE_BAND_2_4GHZ; break; case IEEE80211_BAND_5GHZ: cmd->band = WLCORE_BAND_5GHZ; break; default: wl1271_error("invalid channel switch band: %d", ch_switch->chandef.chan->band); ret = -EINVAL; goto out_free; } supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES | wlcore_hw_sta_get_ap_rate_mask(wl, wlvif); if (wlvif->p2p) supported_rates &= ~CONF_TX_CCK_RATES; cmd->local_supported_rates = cpu_to_le32(supported_rates); cmd->channel_type = wlvif->channel_type; ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_error("failed to send channel switch command"); goto out_free; } out_free: kfree(cmd); out: return ret; }
gpl-2.0
EloYGomeZ/caf-j1-exp
drivers/hwmon/coretemp.c
2663
21553
/* * coretemp.c - Linux kernel module for hardware monitoring * * Copyright (C) 2007 Rudolf Marek <r.marek@assembler.cz> * * Inspired from many hwmon drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/hwmon.h> #include <linux/sysfs.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/moduleparam.h> #include <asm/msr.h> #include <asm/processor.h> #include <asm/cpu_device_id.h> #define DRVNAME "coretemp" /* * force_tjmax only matters when TjMax can't be read from the CPU itself. * When set, it replaces the driver's suboptimal heuristic. */ static int force_tjmax; module_param_named(tjmax, force_tjmax, int, 0444); MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) #define TO_PHYS_ID(cpu) (cpu_data(cpu).phys_proc_id) #define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) #define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu)) #else #define for_each_sibling(i, cpu) for (i = 0; false; ) #endif /* * Per-Core Temperature Data * @last_updated: The time when the current temperature value was updated * earlier (in jiffies). * @cpu_core_id: The CPU Core from which temperature values should be read * This value is passed as "id" field to rdmsr/wrmsr functions. * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, * from where the temperature values should be read. * @attr_size: Total number of pre-core attrs displayed in the sysfs. * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. * Otherwise, temp_data holds coretemp data. * @valid: If this is 1, the current temperature is valid. */ struct temp_data { int temp; int ttarget; int tjmax; unsigned long last_updated; unsigned int cpu; u32 cpu_core_id; u32 status_reg; int attr_size; bool is_pkg_data; bool valid; struct sensor_device_attribute sd_attrs[TOTAL_ATTRS]; char attr_name[TOTAL_ATTRS][CORETEMP_NAME_LENGTH]; struct mutex update_lock; }; /* Platform Data per Physical CPU */ struct platform_data { struct device *hwmon_dev; u16 phys_proc_id; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; }; struct pdev_entry { struct list_head list; struct platform_device *pdev; u16 phys_proc_id; }; static LIST_HEAD(pdev_list); static DEFINE_MUTEX(pdev_list_mutex); static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", DRVNAME); } static ssize_t show_label(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; if (tdata->is_pkg_data) return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id); return sprintf(buf, "Core %u\n", tdata->cpu_core_id); } static ssize_t show_crit_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { u32 eax, edx; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); return sprintf(buf, "%d\n", (eax >> 5) & 1); } static ssize_t show_tjmax(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tjmax); } static ssize_t show_ttarget(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); } static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { u32 eax, edx; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct platform_data *pdata = dev_get_drvdata(dev); struct temp_data *tdata = pdata->core_data[attr->index]; mutex_lock(&tdata->update_lock); /* Check whether the time interval has elapsed */ if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) { rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); tdata->valid = 0; /* Check whether the data is valid */ if (eax & 0x80000000) { tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000; tdata->valid = 1; } tdata->last_updated = jiffies; } mutex_unlock(&tdata->update_lock); return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; } static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { /* The 100C is default for both mobile and non mobile CPUs */ int tjmax = 100000; int tjmax_ee = 85000; int usemsr_ee = 1; int err; u32 eax, edx; struct pci_dev *host_bridge; /* Early chips have no MSR for TjMax */ if (c->x86_model == 0xf && c->x86_mask < 4) usemsr_ee = 0; /* Atom CPUs */ if (c->x86_model == 0x1c) { usemsr_ee = 0; host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL && (host_bridge->device == 0xa000 /* NM10 based nettop */ || host_bridge->device == 0xa010)) /* NM10 based netbook */ tjmax = 100000; else tjmax = 90000; pci_dev_put(host_bridge); } if (c->x86_model > 0xe && usemsr_ee) { u8 platform_id; /* * Now we can detect the mobile CPU using Intel provided table * http://softwarecommunity.intel.com/Wiki/Mobility/720.htm * For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU */ err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0x17, assuming desktop" " CPU\n"); usemsr_ee = 0; } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) { /* * Trust bit 28 up to Penryn, I could not find any * documentation on that; if you happen to know * someone at Intel please ask */ usemsr_ee = 0; } else { /* Platform ID bits 52:50 (EDX starts at bit 32) */ platform_id = (edx >> 18) & 0x7; /* * Mobile Penryn CPU seems to be platform ID 7 or 5 * (guesswork) */ if (c->x86_model == 0x17 && (platform_id == 5 || platform_id == 7)) { /* * If MSR EE bit is set, set it to 90 degrees C, * otherwise 105 degrees C */ tjmax_ee = 90000; tjmax = 105000; } } } if (usemsr_ee) { err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx); if (err) { dev_warn(dev, "Unable to access MSR 0xEE, for Tjmax, left" " at default\n"); } else if (eax & 0x40000000) { tjmax = tjmax_ee; } } else if (tjmax == 100000) { /* * If we don't use msr EE it means we are desktop CPU * (with exeception of Atom) */ dev_warn(dev, "Using relative temperature scale!\n"); } return tjmax; } static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) { int err; u32 eax, edx; u32 val; /* * A new feature of current Intel(R) processors, the * IA32_TEMPERATURE_TARGET contains the TjMax value */ err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (err) { if (c->x86_model > 0xe && c->x86_model != 0x1c) dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { val = (eax >> 16) & 0xff; /* * If the TjMax is not plausible, an assumption * will be used */ if (val) { dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } } if (force_tjmax) { dev_notice(dev, "TjMax forced to %d degrees C by user\n", force_tjmax); return force_tjmax * 1000; } /* * An assumption is made for early CPUs and unreadable MSR. * NOTE: the calculated value may not be correct. */ return adjust_tjmax(c, id, dev); } static int __devinit create_name_attr(struct platform_data *pdata, struct device *dev) { sysfs_attr_init(&pdata->name_attr.attr); pdata->name_attr.attr.name = "name"; pdata->name_attr.attr.mode = S_IRUGO; pdata->name_attr.show = show_name; return device_create_file(dev, &pdata->name_attr); } static int __cpuinit create_core_attrs(struct temp_data *tdata, struct device *dev, int attr_no) { int err, i; static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, struct device_attribute *devattr, char *buf) = { show_label, show_crit_alarm, show_temp, show_tjmax, show_ttarget }; static const char *const names[TOTAL_ATTRS] = { "temp%d_label", "temp%d_crit_alarm", "temp%d_input", "temp%d_crit", "temp%d_max" }; for (i = 0; i < tdata->attr_size; i++) { snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], attr_no); sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; tdata->sd_attrs[i].index = attr_no; err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); if (err) goto exit_free; } return 0; exit_free: while (--i >= 0) device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); return err; } static int __cpuinit chk_ucode_version(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); /* * Check if we have problem with errata AE18 of Core processors: * Readings might stop update when processor visited too deep sleep, * fixed for stepping D0 (6EC). */ if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { pr_err("Errata AE18 not fixed, update BIOS or " "microcode of the CPU!\n"); return -ENODEV; } return 0; } static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu) { u16 phys_proc_id = TO_PHYS_ID(cpu); struct pdev_entry *p; mutex_lock(&pdev_list_mutex); list_for_each_entry(p, &pdev_list, list) if (p->phys_proc_id == phys_proc_id) { mutex_unlock(&pdev_list_mutex); return p->pdev; } mutex_unlock(&pdev_list_mutex); return NULL; } static struct temp_data __cpuinit *init_temp_data(unsigned int cpu, int pkg_flag) { struct temp_data *tdata; tdata = kzalloc(sizeof(struct temp_data), GFP_KERNEL); if (!tdata) return NULL; tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; tdata->cpu_core_id = TO_CORE_ID(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; } static int __cpuinit create_core_data(struct platform_device *pdev, unsigned int cpu, int pkg_flag) { struct temp_data *tdata; struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; int err, attr_no; /* * Find attr number for sysfs: * We map the attr number to core id of the CPU * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu); if (attr_no > MAX_CORE_DATA - 1) return -ERANGE; /* * Provide a single set of attributes for all HT siblings of a core * to avoid duplicate sensors (the processor ID and core ID of all * HT siblings of a core are the same). * Skip if a HT sibling of this core is already registered. * This is not an error. */ if (pdata->core_data[attr_no] != NULL) return 0; tdata = init_temp_data(cpu, pkg_flag); if (!tdata) return -ENOMEM; /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); if (err) goto exit_free; /* We can access status register. Get Critical Temperature */ tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); /* * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. * The target temperature is available on older CPUs but not in this * register. Atoms don't have the register at all. */ if (c->x86_model > 0xe && c->x86_model != 0x1c) { err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); if (!err) { tdata->ttarget = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; tdata->attr_size++; } } pdata->core_data[attr_no] = tdata; /* Create sysfs interfaces */ err = create_core_attrs(tdata, &pdev->dev, attr_no); if (err) goto exit_free; return 0; exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); return err; } static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag) { struct platform_device *pdev = coretemp_get_pdev(cpu); int err; if (!pdev) return; err = create_core_data(pdev, cpu, pkg_flag); if (err) dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); } static void coretemp_remove_core(struct platform_data *pdata, struct device *dev, int indx) { int i; struct temp_data *tdata = pdata->core_data[indx]; /* Remove the sysfs attributes */ for (i = 0; i < tdata->attr_size; i++) device_remove_file(dev, &tdata->sd_attrs[i].dev_attr); kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; } static int __devinit coretemp_probe(struct platform_device *pdev) { struct platform_data *pdata; int err; /* Initialize the per-package data structures */ pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); if (!pdata) return -ENOMEM; err = create_name_attr(pdata, &pdev->dev); if (err) goto exit_free; pdata->phys_proc_id = pdev->id; platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(pdata->hwmon_dev)) { err = PTR_ERR(pdata->hwmon_dev); dev_err(&pdev->dev, "Class registration failed (%d)\n", err); goto exit_name; } return 0; exit_name: device_remove_file(&pdev->dev, &pdata->name_attr); platform_set_drvdata(pdev, NULL); exit_free: kfree(pdata); return err; } static int __devexit coretemp_remove(struct platform_device *pdev) { struct platform_data *pdata = platform_get_drvdata(pdev); int i; for (i = MAX_CORE_DATA - 1; i >= 0; --i) if (pdata->core_data[i]) coretemp_remove_core(pdata, &pdev->dev, i); device_remove_file(&pdev->dev, &pdata->name_attr); hwmon_device_unregister(pdata->hwmon_dev); platform_set_drvdata(pdev, NULL); kfree(pdata); return 0; } static struct platform_driver coretemp_driver = { .driver = { .owner = THIS_MODULE, .name = DRVNAME, }, .probe = coretemp_probe, .remove = __devexit_p(coretemp_remove), }; static int __cpuinit coretemp_device_add(unsigned int cpu) { int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; mutex_lock(&pdev_list_mutex); pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit; } pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); if (!pdev_entry) { err = -ENOMEM; goto exit_device_put; } err = platform_device_add(pdev); if (err) { pr_err("Device addition failed (%d)\n", err); goto exit_device_free; } pdev_entry->pdev = pdev; pdev_entry->phys_proc_id = pdev->id; list_add_tail(&pdev_entry->list, &pdev_list); mutex_unlock(&pdev_list_mutex); return 0; exit_device_free: kfree(pdev_entry); exit_device_put: platform_device_put(pdev); exit: mutex_unlock(&pdev_list_mutex); return err; } static void __cpuinit coretemp_device_remove(unsigned int cpu) { struct pdev_entry *p, *n; u16 phys_proc_id = TO_PHYS_ID(cpu); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { if (p->phys_proc_id != phys_proc_id) continue; platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); } static bool __cpuinit is_any_core_online(struct platform_data *pdata) { int i; /* Find online cores, except pkgtemp data */ for (i = MAX_CORE_DATA - 1; i >= 0; --i) { if (pdata->core_data[i] && !pdata->core_data[i]->is_pkg_data) { return true; } } return false; } static void __cpuinit get_core_online(unsigned int cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); struct platform_device *pdev = coretemp_get_pdev(cpu); int err; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ if (!cpu_has(c, X86_FEATURE_DTS)) return; if (!pdev) { /* Check the microcode version of the CPU */ if (chk_ucode_version(cpu)) return; /* * Alright, we have DTS support. * We are bringing the _first_ core in this pkg * online. So, initialize per-pkg data structures and * then bring this core online. */ err = coretemp_device_add(cpu); if (err) return; /* * Check whether pkgtemp support is available. * If so, add interfaces for pkgtemp. */ if (cpu_has(c, X86_FEATURE_PTS)) coretemp_add_core(cpu, 1); } /* * Physical CPU device already exists. * So, just add interfaces for this core. */ coretemp_add_core(cpu, 0); } static void __cpuinit put_core_offline(unsigned int cpu) { int i, indx; struct platform_data *pdata; struct platform_device *pdev = coretemp_get_pdev(cpu); /* If the physical CPU device does not exist, just return */ if (!pdev) return; pdata = platform_get_drvdata(pdev); indx = TO_ATTR_NO(cpu); /* The core id is too big, just return */ if (indx > MAX_CORE_DATA - 1) return; if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) coretemp_remove_core(pdata, &pdev->dev, indx); /* * If a HT sibling of a core is taken offline, but another HT sibling * of the same core is still online, register the alternate sibling. * This ensures that exactly one set of attributes is provided as long * as at least one HT sibling of a core is online. */ for_each_sibling(i, cpu) { if (i != cpu) { get_core_online(i); /* * Display temperature sensor data for one HT sibling * per core only, so abort the loop after one such * sibling has been found. */ break; } } /* * If all cores in this pkg are offline, remove the device. * coretemp_device_remove calls unregister_platform_device, * which in turn calls coretemp_remove. This removes the * pkgtemp entry and does other clean ups. */ if (!is_any_core_online(pdata)) coretemp_device_remove(cpu); } static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long) hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: get_core_online(cpu); break; case CPU_DOWN_PREPARE: put_core_offline(cpu); break; } return NOTIFY_OK; } static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; static const struct x86_cpu_id coretemp_ids[] = { { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS }, {} }; MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); static int __init coretemp_init(void) { int i, err = -ENODEV; /* * CPUID.06H.EAX[0] indicates whether the CPU has thermal * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ if (!x86_match_cpu(coretemp_ids)) return -ENODEV; err = platform_driver_register(&coretemp_driver); if (err) goto exit; for_each_online_cpu(i) get_core_online(i); #ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } #endif register_hotcpu_notifier(&coretemp_cpu_notifier); return 0; #ifndef CONFIG_HOTPLUG_CPU exit_driver_unreg: platform_driver_unregister(&coretemp_driver); #endif exit: return err; } static void __exit coretemp_exit(void) { struct pdev_entry *p, *n; unregister_hotcpu_notifier(&coretemp_cpu_notifier); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { platform_device_unregister(p->pdev); list_del(&p->list); kfree(p); } mutex_unlock(&pdev_list_mutex); platform_driver_unregister(&coretemp_driver); } MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>"); MODULE_DESCRIPTION("Intel Core temperature monitor"); MODULE_LICENSE("GPL"); module_init(coretemp_init) module_exit(coretemp_exit)
gpl-2.0
robreardon/android_kernel_motorola_olympus
drivers/net/stmmac/norm_desc.c
2919
5809
/******************************************************************************* This contains the functions to handle the normal descriptors. Copyright (C) 2007-2009 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include "common.h" static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.tx.error_summary)) { if (unlikely(p->des01.tx.underflow_error)) { x->tx_underflow++; stats->tx_fifo_errors++; } if (unlikely(p->des01.tx.no_carrier)) { x->tx_carrier++; stats->tx_carrier_errors++; } if (unlikely(p->des01.tx.loss_carrier)) { x->tx_losscarrier++; stats->tx_carrier_errors++; } if (unlikely((p->des01.tx.excessive_deferral) || (p->des01.tx.excessive_collisions) || (p->des01.tx.late_collision))) stats->collisions += p->des01.tx.collision_count; ret = -1; } if (unlikely(p->des01.tx.heartbeat_fail)) { x->tx_heartbeat++; stats->tx_heartbeat_errors++; ret = -1; } if (unlikely(p->des01.tx.deferred)) x->tx_deferred++; return ret; } static int ndesc_get_tx_len(struct dma_desc *p) { return p->des01.tx.buffer1_size; } /* This function verifies if each incoming frame has some errors * and, if required, updates the multicast statistics. * In case of success, it returns csum_none because the device * is not able to compute the csum in HW. */ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { int ret = csum_none; struct net_device_stats *stats = (struct net_device_stats *)data; if (unlikely(p->des01.rx.last_descriptor == 0)) { pr_warning("ndesc Error: Oversized Ethernet " "frame spanned multiple buffers\n"); stats->rx_length_errors++; return discard_frame; } if (unlikely(p->des01.rx.error_summary)) { if (unlikely(p->des01.rx.descriptor_error)) x->rx_desc++; if (unlikely(p->des01.rx.partial_frame_error)) x->rx_partial++; if (unlikely(p->des01.rx.run_frame)) x->rx_runt++; if (unlikely(p->des01.rx.frame_too_long)) x->rx_toolong++; if (unlikely(p->des01.rx.collision)) { x->rx_collision++; stats->collisions++; } if (unlikely(p->des01.rx.crc_error)) { x->rx_crc++; stats->rx_crc_errors++; } ret = discard_frame; } if (unlikely(p->des01.rx.dribbling)) ret = discard_frame; if (unlikely(p->des01.rx.length_error)) { x->rx_length++; ret = discard_frame; } if (unlikely(p->des01.rx.mii_error)) { x->rx_mii++; ret = discard_frame; } if (p->des01.rx.multicast_frame) { x->rx_multicast++; stats->multicast++; } return ret; } static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, int disable_rx_ic) { int i; for (i = 0; i < ring_size; i++) { p->des01.rx.own = 1; p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; if (i == ring_size - 1) p->des01.rx.end_ring = 1; if (disable_rx_ic) p->des01.rx.disable_ic = 1; p++; } } static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) { int i; for (i = 0; i < ring_size; i++) { p->des01.tx.own = 0; if (i == ring_size - 1) p->des01.tx.end_ring = 1; p++; } } static int ndesc_get_tx_owner(struct dma_desc *p) { return p->des01.tx.own; } static int ndesc_get_rx_owner(struct dma_desc *p) { return p->des01.rx.own; } static void ndesc_set_tx_owner(struct dma_desc *p) { p->des01.tx.own = 1; } static void ndesc_set_rx_owner(struct dma_desc *p) { p->des01.rx.own = 1; } static int ndesc_get_tx_ls(struct dma_desc *p) { return p->des01.tx.last_segment; } static void ndesc_release_tx_desc(struct dma_desc *p) { int ter = p->des01.tx.end_ring; memset(p, 0, offsetof(struct dma_desc, des2)); /* set termination field */ p->des01.tx.end_ring = ter; } static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, int csum_flag) { p->des01.tx.first_segment = is_fs; p->des01.tx.buffer1_size = len; } static void ndesc_clear_tx_ic(struct dma_desc *p) { p->des01.tx.interrupt = 0; } static void ndesc_close_tx_desc(struct dma_desc *p) { p->des01.tx.last_segment = 1; p->des01.tx.interrupt = 1; } static int ndesc_get_rx_frame_len(struct dma_desc *p) { return p->des01.rx.frame_length; } const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, .get_tx_len = ndesc_get_tx_len, .init_rx_desc = ndesc_init_rx_desc, .init_tx_desc = ndesc_init_tx_desc, .get_tx_owner = ndesc_get_tx_owner, .get_rx_owner = ndesc_get_rx_owner, .release_tx_desc = ndesc_release_tx_desc, .prepare_tx_desc = ndesc_prepare_tx_desc, .clear_tx_ic = ndesc_clear_tx_ic, .close_tx_desc = ndesc_close_tx_desc, .get_tx_ls = ndesc_get_tx_ls, .set_tx_owner = ndesc_set_tx_owner, .set_rx_owner = ndesc_set_rx_owner, .get_rx_frame_len = ndesc_get_rx_frame_len, };
gpl-2.0
jyunyen/Nexus7_Kernal
arch/arm/mach-omap1/lcd_dma.c
3175
10920
/* * linux/arch/arm/mach-omap1/lcd_dma.c * * Extracted from arch/arm/plat-omap/dma.c * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com> * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * Support functions for the OMAP internal DMA channels. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/lcdc.h> #include <plat/dma.h> int omap_lcd_dma_running(void) { /* * On OMAP1510, internal LCD controller will start the transfer * when it gets enabled, so assume DMA running if LCD enabled. */ if (cpu_is_omap15xx()) if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) return 1; /* Check if LCD DMA is running */ if (cpu_is_omap16xx()) if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) return 1; return 0; } static struct lcd_dma_info { spinlock_t lock; int reserved; void (*callback)(u16 status, void *data); void *cb_data; int active; unsigned long addr, size; int rotate, data_type, xres, yres; int vxres; int mirror; int xscale, yscale; int ext_ctrl; int src_port; int single_transfer; } lcd_dma; void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, int data_type) { lcd_dma.addr = addr; lcd_dma.data_type = data_type; lcd_dma.xres = fb_xres; lcd_dma.yres = fb_yres; } EXPORT_SYMBOL(omap_set_lcd_dma_b1); void omap_set_lcd_dma_src_port(int port) { lcd_dma.src_port = port; } void omap_set_lcd_dma_ext_controller(int external) { lcd_dma.ext_ctrl = external; } EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller); void omap_set_lcd_dma_single_transfer(int single) { lcd_dma.single_transfer = single; } EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); void omap_set_lcd_dma_b1_rotation(int rotate) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); BUG(); return; } lcd_dma.rotate = rotate; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); void omap_set_lcd_dma_b1_mirror(int mirror) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); BUG(); } lcd_dma.mirror = mirror; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); void omap_set_lcd_dma_b1_vxres(unsigned long vxres) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA virtual resulotion is not supported " "in 1510 mode\n"); BUG(); } lcd_dma.vxres = vxres; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); BUG(); } lcd_dma.xscale = xscale; lcd_dma.yscale = yscale; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale); static void set_b1_regs(void) { unsigned long top, bottom; int es; u16 w; unsigned long en, fn; long ei, fi; unsigned long vxres; unsigned int xscale, yscale; switch (lcd_dma.data_type) { case OMAP_DMA_DATA_TYPE_S8: es = 1; break; case OMAP_DMA_DATA_TYPE_S16: es = 2; break; case OMAP_DMA_DATA_TYPE_S32: es = 4; break; default: BUG(); return; } vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres; xscale = lcd_dma.xscale ? lcd_dma.xscale : 1; yscale = lcd_dma.yscale ? lcd_dma.yscale : 1; BUG_ON(vxres < lcd_dma.xres); #define PIXADDR(x, y) (lcd_dma.addr + \ ((y) * vxres * yscale + (x) * xscale) * es) #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1) switch (lcd_dma.rotate) { case 0: if (!lcd_dma.mirror) { top = PIXADDR(0, 0); bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); /* 1510 DMA requires the bottom address to be 2 more * than the actual last memory access location. */ if (cpu_is_omap15xx() && lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) bottom += 2; ei = PIXSTEP(0, 0, 1, 0); fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1); } else { top = PIXADDR(lcd_dma.xres - 1, 0); bottom = PIXADDR(0, lcd_dma.yres - 1); ei = PIXSTEP(1, 0, 0, 0); fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1); } en = lcd_dma.xres; fn = lcd_dma.yres; break; case 90: if (!lcd_dma.mirror) { top = PIXADDR(0, lcd_dma.yres - 1); bottom = PIXADDR(lcd_dma.xres - 1, 0); ei = PIXSTEP(0, 1, 0, 0); fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1); } else { top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); bottom = PIXADDR(0, 0); ei = PIXSTEP(0, 1, 0, 0); fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1); } en = lcd_dma.yres; fn = lcd_dma.xres; break; case 180: if (!lcd_dma.mirror) { top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); bottom = PIXADDR(0, 0); ei = PIXSTEP(1, 0, 0, 0); fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0); } else { top = PIXADDR(0, lcd_dma.yres - 1); bottom = PIXADDR(lcd_dma.xres - 1, 0); ei = PIXSTEP(0, 0, 1, 0); fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0); } en = lcd_dma.xres; fn = lcd_dma.yres; break; case 270: if (!lcd_dma.mirror) { top = PIXADDR(lcd_dma.xres - 1, 0); bottom = PIXADDR(0, lcd_dma.yres - 1); ei = PIXSTEP(0, 0, 0, 1); fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0); } else { top = PIXADDR(0, 0); bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); ei = PIXSTEP(0, 0, 0, 1); fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0); } en = lcd_dma.yres; fn = lcd_dma.xres; break; default: BUG(); return; /* Suppress warning about uninitialized vars */ } if (cpu_is_omap15xx()) { omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L); return; } /* 1610 regs */ omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U); omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L); omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U); omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L); omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1); omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1); w = omap_readw(OMAP1610_DMA_LCD_CSDP); w &= ~0x03; w |= lcd_dma.data_type; omap_writew(w, OMAP1610_DMA_LCD_CSDP); w = omap_readw(OMAP1610_DMA_LCD_CTRL); /* Always set the source port as SDRAM for now*/ w &= ~(0x03 << 6); if (lcd_dma.callback != NULL) w |= 1 << 1; /* Block interrupt enable */ else w &= ~(1 << 1); omap_writew(w, OMAP1610_DMA_LCD_CTRL); if (!(lcd_dma.rotate || lcd_dma.mirror || lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale)) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); /* Set the double-indexed addressing mode */ w |= (0x03 << 12); omap_writew(w, OMAP1610_DMA_LCD_CCR); omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1); omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U); omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); } static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CTRL); if (unlikely(!(w & (1 << 3)))) { printk(KERN_WARNING "Spurious LCD DMA IRQ\n"); return IRQ_NONE; } /* Ack the IRQ */ w |= (1 << 3); omap_writew(w, OMAP1610_DMA_LCD_CTRL); lcd_dma.active = 0; if (lcd_dma.callback != NULL) lcd_dma.callback(w, lcd_dma.cb_data); return IRQ_HANDLED; } int omap_request_lcd_dma(void (*callback)(u16 status, void *data), void *data) { spin_lock_irq(&lcd_dma.lock); if (lcd_dma.reserved) { spin_unlock_irq(&lcd_dma.lock); printk(KERN_ERR "LCD DMA channel already reserved\n"); BUG(); return -EBUSY; } lcd_dma.reserved = 1; spin_unlock_irq(&lcd_dma.lock); lcd_dma.callback = callback; lcd_dma.cb_data = data; lcd_dma.active = 0; lcd_dma.single_transfer = 0; lcd_dma.rotate = 0; lcd_dma.vxres = 0; lcd_dma.mirror = 0; lcd_dma.xscale = 0; lcd_dma.yscale = 0; lcd_dma.ext_ctrl = 0; lcd_dma.src_port = 0; return 0; } EXPORT_SYMBOL(omap_request_lcd_dma); void omap_free_lcd_dma(void) { spin_lock(&lcd_dma.lock); if (!lcd_dma.reserved) { spin_unlock(&lcd_dma.lock); printk(KERN_ERR "LCD DMA is not reserved\n"); BUG(); return; } if (!cpu_is_omap15xx()) omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); lcd_dma.reserved = 0; spin_unlock(&lcd_dma.lock); } EXPORT_SYMBOL(omap_free_lcd_dma); void omap_enable_lcd_dma(void) { u16 w; /* * Set the Enable bit only if an external controller is * connected. Otherwise the OMAP internal controller will * start the transfer when it gets enabled. */ if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CTRL); w |= 1 << 8; omap_writew(w, OMAP1610_DMA_LCD_CTRL); lcd_dma.active = 1; w = omap_readw(OMAP1610_DMA_LCD_CCR); w |= 1 << 7; omap_writew(w, OMAP1610_DMA_LCD_CCR); } EXPORT_SYMBOL(omap_enable_lcd_dma); void omap_setup_lcd_dma(void) { BUG_ON(lcd_dma.active); if (!cpu_is_omap15xx()) { /* Set some reasonable defaults */ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); } set_b1_regs(); if (!cpu_is_omap15xx()) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CCR); /* * If DMA was already active set the end_prog bit to have * the programmed register set loaded into the active * register set. */ w |= 1 << 11; /* End_prog */ if (!lcd_dma.single_transfer) w |= (3 << 8); /* Auto_init, repeat */ omap_writew(w, OMAP1610_DMA_LCD_CCR); } } EXPORT_SYMBOL(omap_setup_lcd_dma); void omap_stop_lcd_dma(void) { u16 w; lcd_dma.active = 0; if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); w &= ~(1 << 7); omap_writew(w, OMAP1610_DMA_LCD_CCR); w = omap_readw(OMAP1610_DMA_LCD_CTRL); w &= ~(1 << 8); omap_writew(w, OMAP1610_DMA_LCD_CTRL); } EXPORT_SYMBOL(omap_stop_lcd_dma); static int __init omap_init_lcd_dma(void) { int r; if (!cpu_class_is_omap1()) return -ENODEV; if (cpu_is_omap16xx()) { u16 w; /* this would prevent OMAP sleep */ w = omap_readw(OMAP1610_DMA_LCD_CTRL); w &= ~(1 << 8); omap_writew(w, OMAP1610_DMA_LCD_CTRL); } spin_lock_init(&lcd_dma.lock); r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL); if (r != 0) printk(KERN_ERR "unable to request IRQ for LCD DMA " "(error %d)\n", r); return r; } arch_initcall(omap_init_lcd_dma);
gpl-2.0
munjeni/stock_jb_kexec_kernel_for_locked_bootloader
arch/arm/mach-omap1/lcd_dma.c
3175
10920
/* * linux/arch/arm/mach-omap1/lcd_dma.c * * Extracted from arch/arm/plat-omap/dma.c * Copyright (C) 2003 - 2008 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> * Graphics DMA and LCD DMA graphics tranformations * by Imre Deak <imre.deak@nokia.com> * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com> * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. * * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * Support functions for the OMAP internal DMA channels. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/io.h> #include <mach/hardware.h> #include <mach/lcdc.h> #include <plat/dma.h> int omap_lcd_dma_running(void) { /* * On OMAP1510, internal LCD controller will start the transfer * when it gets enabled, so assume DMA running if LCD enabled. */ if (cpu_is_omap15xx()) if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN) return 1; /* Check if LCD DMA is running */ if (cpu_is_omap16xx()) if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN) return 1; return 0; } static struct lcd_dma_info { spinlock_t lock; int reserved; void (*callback)(u16 status, void *data); void *cb_data; int active; unsigned long addr, size; int rotate, data_type, xres, yres; int vxres; int mirror; int xscale, yscale; int ext_ctrl; int src_port; int single_transfer; } lcd_dma; void omap_set_lcd_dma_b1(unsigned long addr, u16 fb_xres, u16 fb_yres, int data_type) { lcd_dma.addr = addr; lcd_dma.data_type = data_type; lcd_dma.xres = fb_xres; lcd_dma.yres = fb_yres; } EXPORT_SYMBOL(omap_set_lcd_dma_b1); void omap_set_lcd_dma_src_port(int port) { lcd_dma.src_port = port; } void omap_set_lcd_dma_ext_controller(int external) { lcd_dma.ext_ctrl = external; } EXPORT_SYMBOL(omap_set_lcd_dma_ext_controller); void omap_set_lcd_dma_single_transfer(int single) { lcd_dma.single_transfer = single; } EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer); void omap_set_lcd_dma_b1_rotation(int rotate) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n"); BUG(); return; } lcd_dma.rotate = rotate; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation); void omap_set_lcd_dma_b1_mirror(int mirror) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n"); BUG(); } lcd_dma.mirror = mirror; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror); void omap_set_lcd_dma_b1_vxres(unsigned long vxres) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA virtual resulotion is not supported " "in 1510 mode\n"); BUG(); } lcd_dma.vxres = vxres; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres); void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale) { if (cpu_is_omap15xx()) { printk(KERN_ERR "DMA scale is not supported in 1510 mode\n"); BUG(); } lcd_dma.xscale = xscale; lcd_dma.yscale = yscale; } EXPORT_SYMBOL(omap_set_lcd_dma_b1_scale); static void set_b1_regs(void) { unsigned long top, bottom; int es; u16 w; unsigned long en, fn; long ei, fi; unsigned long vxres; unsigned int xscale, yscale; switch (lcd_dma.data_type) { case OMAP_DMA_DATA_TYPE_S8: es = 1; break; case OMAP_DMA_DATA_TYPE_S16: es = 2; break; case OMAP_DMA_DATA_TYPE_S32: es = 4; break; default: BUG(); return; } vxres = lcd_dma.vxres ? lcd_dma.vxres : lcd_dma.xres; xscale = lcd_dma.xscale ? lcd_dma.xscale : 1; yscale = lcd_dma.yscale ? lcd_dma.yscale : 1; BUG_ON(vxres < lcd_dma.xres); #define PIXADDR(x, y) (lcd_dma.addr + \ ((y) * vxres * yscale + (x) * xscale) * es) #define PIXSTEP(sx, sy, dx, dy) (PIXADDR(dx, dy) - PIXADDR(sx, sy) - es + 1) switch (lcd_dma.rotate) { case 0: if (!lcd_dma.mirror) { top = PIXADDR(0, 0); bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); /* 1510 DMA requires the bottom address to be 2 more * than the actual last memory access location. */ if (cpu_is_omap15xx() && lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32) bottom += 2; ei = PIXSTEP(0, 0, 1, 0); fi = PIXSTEP(lcd_dma.xres - 1, 0, 0, 1); } else { top = PIXADDR(lcd_dma.xres - 1, 0); bottom = PIXADDR(0, lcd_dma.yres - 1); ei = PIXSTEP(1, 0, 0, 0); fi = PIXSTEP(0, 0, lcd_dma.xres - 1, 1); } en = lcd_dma.xres; fn = lcd_dma.yres; break; case 90: if (!lcd_dma.mirror) { top = PIXADDR(0, lcd_dma.yres - 1); bottom = PIXADDR(lcd_dma.xres - 1, 0); ei = PIXSTEP(0, 1, 0, 0); fi = PIXSTEP(0, 0, 1, lcd_dma.yres - 1); } else { top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); bottom = PIXADDR(0, 0); ei = PIXSTEP(0, 1, 0, 0); fi = PIXSTEP(1, 0, 0, lcd_dma.yres - 1); } en = lcd_dma.yres; fn = lcd_dma.xres; break; case 180: if (!lcd_dma.mirror) { top = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); bottom = PIXADDR(0, 0); ei = PIXSTEP(1, 0, 0, 0); fi = PIXSTEP(0, 1, lcd_dma.xres - 1, 0); } else { top = PIXADDR(0, lcd_dma.yres - 1); bottom = PIXADDR(lcd_dma.xres - 1, 0); ei = PIXSTEP(0, 0, 1, 0); fi = PIXSTEP(lcd_dma.xres - 1, 1, 0, 0); } en = lcd_dma.xres; fn = lcd_dma.yres; break; case 270: if (!lcd_dma.mirror) { top = PIXADDR(lcd_dma.xres - 1, 0); bottom = PIXADDR(0, lcd_dma.yres - 1); ei = PIXSTEP(0, 0, 0, 1); fi = PIXSTEP(1, lcd_dma.yres - 1, 0, 0); } else { top = PIXADDR(0, 0); bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1); ei = PIXSTEP(0, 0, 0, 1); fi = PIXSTEP(0, lcd_dma.yres - 1, 1, 0); } en = lcd_dma.yres; fn = lcd_dma.xres; break; default: BUG(); return; /* Suppress warning about uninitialized vars */ } if (cpu_is_omap15xx()) { omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U); omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L); omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U); omap_writew(bottom, OMAP1510_DMA_LCD_BOT_F1_L); return; } /* 1610 regs */ omap_writew(top >> 16, OMAP1610_DMA_LCD_TOP_B1_U); omap_writew(top, OMAP1610_DMA_LCD_TOP_B1_L); omap_writew(bottom >> 16, OMAP1610_DMA_LCD_BOT_B1_U); omap_writew(bottom, OMAP1610_DMA_LCD_BOT_B1_L); omap_writew(en, OMAP1610_DMA_LCD_SRC_EN_B1); omap_writew(fn, OMAP1610_DMA_LCD_SRC_FN_B1); w = omap_readw(OMAP1610_DMA_LCD_CSDP); w &= ~0x03; w |= lcd_dma.data_type; omap_writew(w, OMAP1610_DMA_LCD_CSDP); w = omap_readw(OMAP1610_DMA_LCD_CTRL); /* Always set the source port as SDRAM for now*/ w &= ~(0x03 << 6); if (lcd_dma.callback != NULL) w |= 1 << 1; /* Block interrupt enable */ else w &= ~(1 << 1); omap_writew(w, OMAP1610_DMA_LCD_CTRL); if (!(lcd_dma.rotate || lcd_dma.mirror || lcd_dma.vxres || lcd_dma.xscale || lcd_dma.yscale)) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); /* Set the double-indexed addressing mode */ w |= (0x03 << 12); omap_writew(w, OMAP1610_DMA_LCD_CCR); omap_writew(ei, OMAP1610_DMA_LCD_SRC_EI_B1); omap_writew(fi >> 16, OMAP1610_DMA_LCD_SRC_FI_B1_U); omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L); } static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CTRL); if (unlikely(!(w & (1 << 3)))) { printk(KERN_WARNING "Spurious LCD DMA IRQ\n"); return IRQ_NONE; } /* Ack the IRQ */ w |= (1 << 3); omap_writew(w, OMAP1610_DMA_LCD_CTRL); lcd_dma.active = 0; if (lcd_dma.callback != NULL) lcd_dma.callback(w, lcd_dma.cb_data); return IRQ_HANDLED; } int omap_request_lcd_dma(void (*callback)(u16 status, void *data), void *data) { spin_lock_irq(&lcd_dma.lock); if (lcd_dma.reserved) { spin_unlock_irq(&lcd_dma.lock); printk(KERN_ERR "LCD DMA channel already reserved\n"); BUG(); return -EBUSY; } lcd_dma.reserved = 1; spin_unlock_irq(&lcd_dma.lock); lcd_dma.callback = callback; lcd_dma.cb_data = data; lcd_dma.active = 0; lcd_dma.single_transfer = 0; lcd_dma.rotate = 0; lcd_dma.vxres = 0; lcd_dma.mirror = 0; lcd_dma.xscale = 0; lcd_dma.yscale = 0; lcd_dma.ext_ctrl = 0; lcd_dma.src_port = 0; return 0; } EXPORT_SYMBOL(omap_request_lcd_dma); void omap_free_lcd_dma(void) { spin_lock(&lcd_dma.lock); if (!lcd_dma.reserved) { spin_unlock(&lcd_dma.lock); printk(KERN_ERR "LCD DMA is not reserved\n"); BUG(); return; } if (!cpu_is_omap15xx()) omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR); lcd_dma.reserved = 0; spin_unlock(&lcd_dma.lock); } EXPORT_SYMBOL(omap_free_lcd_dma); void omap_enable_lcd_dma(void) { u16 w; /* * Set the Enable bit only if an external controller is * connected. Otherwise the OMAP internal controller will * start the transfer when it gets enabled. */ if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CTRL); w |= 1 << 8; omap_writew(w, OMAP1610_DMA_LCD_CTRL); lcd_dma.active = 1; w = omap_readw(OMAP1610_DMA_LCD_CCR); w |= 1 << 7; omap_writew(w, OMAP1610_DMA_LCD_CCR); } EXPORT_SYMBOL(omap_enable_lcd_dma); void omap_setup_lcd_dma(void) { BUG_ON(lcd_dma.active); if (!cpu_is_omap15xx()) { /* Set some reasonable defaults */ omap_writew(0x5440, OMAP1610_DMA_LCD_CCR); omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP); omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL); } set_b1_regs(); if (!cpu_is_omap15xx()) { u16 w; w = omap_readw(OMAP1610_DMA_LCD_CCR); /* * If DMA was already active set the end_prog bit to have * the programmed register set loaded into the active * register set. */ w |= 1 << 11; /* End_prog */ if (!lcd_dma.single_transfer) w |= (3 << 8); /* Auto_init, repeat */ omap_writew(w, OMAP1610_DMA_LCD_CCR); } } EXPORT_SYMBOL(omap_setup_lcd_dma); void omap_stop_lcd_dma(void) { u16 w; lcd_dma.active = 0; if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl) return; w = omap_readw(OMAP1610_DMA_LCD_CCR); w &= ~(1 << 7); omap_writew(w, OMAP1610_DMA_LCD_CCR); w = omap_readw(OMAP1610_DMA_LCD_CTRL); w &= ~(1 << 8); omap_writew(w, OMAP1610_DMA_LCD_CTRL); } EXPORT_SYMBOL(omap_stop_lcd_dma); static int __init omap_init_lcd_dma(void) { int r; if (!cpu_class_is_omap1()) return -ENODEV; if (cpu_is_omap16xx()) { u16 w; /* this would prevent OMAP sleep */ w = omap_readw(OMAP1610_DMA_LCD_CTRL); w &= ~(1 << 8); omap_writew(w, OMAP1610_DMA_LCD_CTRL); } spin_lock_init(&lcd_dma.lock); r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL); if (r != 0) printk(KERN_ERR "unable to request IRQ for LCD DMA " "(error %d)\n", r); return r; } arch_initcall(omap_init_lcd_dma);
gpl-2.0
jaej-dev/Kernel_Unico
fs/ext2/dir.c
3175
18270
/* * linux/fs/ext2/dir.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 directory handling functions * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 * * All code that works with directory layout had been switched to pagecache * and moved here. AV */ #include "ext2.h" #include <linux/buffer_head.h> #include <linux/pagemap.h> #include <linux/swap.h> typedef struct ext2_dir_entry_2 ext2_dirent; /* * Tests against MAX_REC_LEN etc were put in place for 64k block * sizes; if that is not possible on this arch, we can skip * those tests and speed things up. */ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); #if (PAGE_CACHE_SIZE >= 65536) if (len == EXT2_MAX_REC_LEN) return 1 << 16; #endif return len; } static inline __le16 ext2_rec_len_to_disk(unsigned len) { #if (PAGE_CACHE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(EXT2_MAX_REC_LEN); else BUG_ON(len > (1 << 16)); #endif return cpu_to_le16(len); } /* * ext2 uses block-sized chunks. Arguably, sector-sized ones would be * more robust, but we have what we have */ static inline unsigned ext2_chunk_size(struct inode *inode) { return inode->i_sb->s_blocksize; } static inline void ext2_put_page(struct page *page) { kunmap(page); page_cache_release(page); } static inline unsigned long dir_pages(struct inode *inode) { return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; } /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned ext2_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; last_byte -= page_nr << PAGE_CACHE_SHIFT; if (last_byte > PAGE_CACHE_SIZE) last_byte = PAGE_CACHE_SIZE; return last_byte; } static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) { struct address_space *mapping = page->mapping; struct inode *dir = mapping->host; int err = 0; dir->i_version++; block_write_end(NULL, mapping, pos, len, len, page, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } if (IS_DIRSYNC(dir)) { err = write_one_page(page, 1); if (!err) err = sync_inode_metadata(dir, 1); } else { unlock_page(page); } return err; } static void ext2_check_page(struct page *page, int quiet) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; unsigned chunk_size = ext2_chunk_size(dir); char *kaddr = page_address(page); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; unsigned limit = PAGE_CACHE_SIZE; ext2_dirent *p; char *error; if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { limit = dir->i_size & ~PAGE_CACHE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) goto out; } for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) { p = (ext2_dirent *)(kaddr + offs); rec_len = ext2_rec_len_from_disk(p->rec_len); if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) goto Eshort; if (unlikely(rec_len & 3)) goto Ealign; if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) goto Enamelen; if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) goto Espan; if (unlikely(le32_to_cpu(p->inode) > max_inumber)) goto Einumber; } if (offs != limit) goto Eend; out: SetPageChecked(page); return; /* Too bad, we had an error */ Ebadsize: if (!quiet) ext2_error(sb, __func__, "size of directory #%lu is not a multiple " "of chunk size", dir->i_ino); goto fail; Eshort: error = "rec_len is smaller than minimal"; goto bad_entry; Ealign: error = "unaligned directory entry"; goto bad_entry; Enamelen: error = "rec_len is too small for name_len"; goto bad_entry; Espan: error = "directory entry across blocks"; goto bad_entry; Einumber: error = "inode out of bounds"; bad_entry: if (!quiet) ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode), rec_len, p->name_len); goto fail; Eend: if (!quiet) { p = (ext2_dirent *)(kaddr + offs); ext2_error(sb, "ext2_check_page", "entry in directory #%lu spans the page boundary" "offset=%lu, inode=%lu", dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode)); } fail: SetPageChecked(page); SetPageError(page); } static struct page * ext2_get_page(struct inode *dir, unsigned long n, int quiet) { struct address_space *mapping = dir->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); if (!PageChecked(page)) ext2_check_page(page, quiet); if (PageError(page)) goto fail; } return page; fail: ext2_put_page(page); return ERR_PTR(-EIO); } /* * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure. * * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller. */ static inline int ext2_match (int len, const char * const name, struct ext2_dir_entry_2 * de) { if (len != de->name_len) return 0; if (!de->inode) return 0; return !memcmp(name, de->name, len); } /* * p is at least 6 bytes before the end of page */ static inline ext2_dirent *ext2_next_entry(ext2_dirent *p) { return (ext2_dirent *)((char *)p + ext2_rec_len_from_disk(p->rec_len)); } static inline unsigned ext2_validate_entry(char *base, unsigned offset, unsigned mask) { ext2_dirent *de = (ext2_dirent*)(base + offset); ext2_dirent *p = (ext2_dirent*)(base + (offset&mask)); while ((char*)p < (char*)de) { if (p->rec_len == 0) break; p = ext2_next_entry(p); } return (char *)p - base; } static unsigned char ext2_filetype_table[EXT2_FT_MAX] = { [EXT2_FT_UNKNOWN] = DT_UNKNOWN, [EXT2_FT_REG_FILE] = DT_REG, [EXT2_FT_DIR] = DT_DIR, [EXT2_FT_CHRDEV] = DT_CHR, [EXT2_FT_BLKDEV] = DT_BLK, [EXT2_FT_FIFO] = DT_FIFO, [EXT2_FT_SOCK] = DT_SOCK, [EXT2_FT_SYMLINK] = DT_LNK, }; #define S_SHIFT 12 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = EXT2_FT_DIR, [S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV, [S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV, [S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO, [S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK, [S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK, }; static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode) { mode_t mode = inode->i_mode; if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT]; else de->file_type = 0; } static int ext2_readdir (struct file * filp, void * dirent, filldir_t filldir) { loff_t pos = filp->f_pos; struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; unsigned int offset = pos & ~PAGE_CACHE_MASK; unsigned long n = pos >> PAGE_CACHE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); unsigned char *types = NULL; int need_revalidate = filp->f_version != inode->i_version; if (pos > inode->i_size - EXT2_DIR_REC_LEN(1)) return 0; if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE)) types = ext2_filetype_table; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; ext2_dirent *de; struct page *page = ext2_get_page(inode, n, 0); if (IS_ERR(page)) { ext2_error(sb, __func__, "bad page in #%lu", inode->i_ino); filp->f_pos += PAGE_CACHE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ext2_validate_entry(kaddr, offset, chunk_mask); filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset; } filp->f_version = inode->i_version; need_revalidate = 0; } de = (ext2_dirent *)(kaddr+offset); limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1); for ( ;(char*)de <= limit; de = ext2_next_entry(de)) { if (de->rec_len == 0) { ext2_error(sb, __func__, "zero-length directory entry"); ext2_put_page(page); return -EIO; } if (de->inode) { int over; unsigned char d_type = DT_UNKNOWN; if (types && de->file_type < EXT2_FT_MAX) d_type = types[de->file_type]; offset = (char *)de - kaddr; over = filldir(dirent, de->name, de->name_len, (n<<PAGE_CACHE_SHIFT) | offset, le32_to_cpu(de->inode), d_type); if (over) { ext2_put_page(page); return 0; } } filp->f_pos += ext2_rec_len_from_disk(de->rec_len); } ext2_put_page(page); } return 0; } /* * ext2_find_entry() * * finds an entry in the specified directory with the wanted name. It * returns the page in which the entry was found (as a parameter - res_page), * and the entry itself. Page is returned mapped and unlocked. * Entry is guaranteed to be valid. */ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir, struct qstr *child, struct page ** res_page) { const char *name = child->name; int namelen = child->len; unsigned reclen = EXT2_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); struct page *page = NULL; struct ext2_inode_info *ei = EXT2_I(dir); ext2_dirent * de; int dir_has_error = 0; if (npages == 0) goto out; /* OFFSET_CACHE */ *res_page = NULL; start = ei->i_dir_start_lookup; if (start >= npages) start = 0; n = start; do { char *kaddr; page = ext2_get_page(dir, n, dir_has_error); if (!IS_ERR(page)) { kaddr = page_address(page); de = (ext2_dirent *) kaddr; kaddr += ext2_last_byte(dir, n) - reclen; while ((char *) de <= kaddr) { if (de->rec_len == 0) { ext2_error(dir->i_sb, __func__, "zero-length directory entry"); ext2_put_page(page); goto out; } if (ext2_match (namelen, name, de)) goto found; de = ext2_next_entry(de); } ext2_put_page(page); } else dir_has_error = 1; if (++n >= npages) n = 0; /* next page is past the blocks we've got */ if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { ext2_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, (unsigned long long)dir->i_blocks); goto out; } } while (n != start); out: return NULL; found: *res_page = page; ei->i_dir_start_lookup = n; return de; } struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p) { struct page *page = ext2_get_page(dir, 0, 0); ext2_dirent *de = NULL; if (!IS_ERR(page)) { de = ext2_next_entry((ext2_dirent *) page_address(page)); *p = page; } return de; } ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child) { ino_t res = 0; struct ext2_dir_entry_2 *de; struct page *page; de = ext2_find_entry (dir, child, &page); if (de) { res = le32_to_cpu(de->inode); ext2_put_page(page); } return res; } static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) { return __block_write_begin(page, pos, len, ext2_get_block); } /* Releases the page */ void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de, struct page *page, struct inode *inode, int update_times) { loff_t pos = page_offset(page) + (char *) de - (char *) page_address(page); unsigned len = ext2_rec_len_from_disk(de->rec_len); int err; lock_page(page); err = ext2_prepare_chunk(page, pos, len); BUG_ON(err); de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type(de, inode); err = ext2_commit_chunk(page, pos, len); ext2_put_page(page); if (update_times) dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(dir); } /* * Parent is locked. */ int ext2_add_link (struct dentry *dentry, struct inode *inode) { struct inode *dir = dentry->d_parent->d_inode; const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; unsigned chunk_size = ext2_chunk_size(dir); unsigned reclen = EXT2_DIR_REC_LEN(namelen); unsigned short rec_len, name_len; struct page *page = NULL; ext2_dirent * de; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr; loff_t pos; int err; /* * We take care of directory expansion in the same loop. * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *dir_end; page = ext2_get_page(dir, n, 0); err = PTR_ERR(page); if (IS_ERR(page)) goto out; lock_page(page); kaddr = page_address(page); dir_end = kaddr + ext2_last_byte(dir, n); de = (ext2_dirent *)kaddr; kaddr += PAGE_CACHE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ name_len = 0; rec_len = chunk_size; de->rec_len = ext2_rec_len_to_disk(chunk_size); de->inode = 0; goto got_it; } if (de->rec_len == 0) { ext2_error(dir->i_sb, __func__, "zero-length directory entry"); err = -EIO; goto out_unlock; } err = -EEXIST; if (ext2_match (namelen, name, de)) goto out_unlock; name_len = EXT2_DIR_REC_LEN(de->name_len); rec_len = ext2_rec_len_from_disk(de->rec_len); if (!de->inode && rec_len >= reclen) goto got_it; if (rec_len >= name_len + reclen) goto got_it; de = (ext2_dirent *) ((char *) de + rec_len); } unlock_page(page); ext2_put_page(page); } BUG(); return -EINVAL; got_it: pos = page_offset(page) + (char*)de - (char*)page_address(page); err = ext2_prepare_chunk(page, pos, rec_len); if (err) goto out_unlock; if (de->inode) { ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len); de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len); de->rec_len = ext2_rec_len_to_disk(name_len); de = de1; } de->name_len = namelen; memcpy(de->name, name, namelen); de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type (de, inode); err = ext2_commit_chunk(page, pos, rec_len); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(dir); /* OFFSET_CACHE */ out_put: ext2_put_page(page); out: return err; out_unlock: unlock_page(page); goto out_put; } /* * ext2_delete_entry deletes a directory entry by merging it with the * previous entry. Page is up-to-date. Releases the page. */ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page ) { struct inode *inode = page->mapping->host; char *kaddr = page_address(page); unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); unsigned to = ((char *)dir - kaddr) + ext2_rec_len_from_disk(dir->rec_len); loff_t pos; ext2_dirent * pde = NULL; ext2_dirent * de = (ext2_dirent *) (kaddr + from); int err; while ((char*)de < (char*)dir) { if (de->rec_len == 0) { ext2_error(inode->i_sb, __func__, "zero-length directory entry"); err = -EIO; goto out; } pde = de; de = ext2_next_entry(de); } if (pde) from = (char*)pde - (char*)page_address(page); pos = page_offset(page) + from; lock_page(page); err = ext2_prepare_chunk(page, pos, to - from); BUG_ON(err); if (pde) pde->rec_len = ext2_rec_len_to_disk(to - from); dir->inode = 0; err = ext2_commit_chunk(page, pos, to - from); inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL; mark_inode_dirty(inode); out: ext2_put_page(page); return err; } /* * Set the first fragment of directory. */ int ext2_make_empty(struct inode *inode, struct inode *parent) { struct page *page = grab_cache_page(inode->i_mapping, 0); unsigned chunk_size = ext2_chunk_size(inode); struct ext2_dir_entry_2 * de; int err; void *kaddr; if (!page) return -ENOMEM; err = ext2_prepare_chunk(page, 0, chunk_size); if (err) { unlock_page(page); goto fail; } kaddr = kmap_atomic(page, KM_USER0); memset(kaddr, 0, chunk_size); de = (struct ext2_dir_entry_2 *)kaddr; de->name_len = 1; de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1)); memcpy (de->name, ".\0\0", 4); de->inode = cpu_to_le32(inode->i_ino); ext2_set_de_type (de, inode); de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1)); de->name_len = 2; de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1)); de->inode = cpu_to_le32(parent->i_ino); memcpy (de->name, "..\0", 4); ext2_set_de_type (de, inode); kunmap_atomic(kaddr, KM_USER0); err = ext2_commit_chunk(page, 0, chunk_size); fail: page_cache_release(page); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int ext2_empty_dir (struct inode * inode) { struct page *page = NULL; unsigned long i, npages = dir_pages(inode); int dir_has_error = 0; for (i = 0; i < npages; i++) { char *kaddr; ext2_dirent * de; page = ext2_get_page(inode, i, dir_has_error); if (IS_ERR(page)) { dir_has_error = 1; continue; } kaddr = page_address(page); de = (ext2_dirent *)kaddr; kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1); while ((char *)de <= kaddr) { if (de->rec_len == 0) { ext2_error(inode->i_sb, __func__, "zero-length directory entry"); printk("kaddr=%p, de=%p\n", kaddr, de); goto not_empty; } if (de->inode != 0) { /* check for . and .. */ if (de->name[0] != '.') goto not_empty; if (de->name_len > 2) goto not_empty; if (de->name_len < 2) { if (de->inode != cpu_to_le32(inode->i_ino)) goto not_empty; } else if (de->name[1] != '.') goto not_empty; } de = ext2_next_entry(de); } ext2_put_page(page); } return 1; not_empty: ext2_put_page(page); return 0; } const struct file_operations ext2_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .readdir = ext2_readdir, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif .fsync = ext2_fsync, };
gpl-2.0
aloksinha2001/picuntu-3.0.8-alok
drivers/scsi/pm8001/pm8001_sas.c
3175
32940
/* * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/slab.h> #include "pm8001_sas.h" /** * pm8001_find_tag - from sas task to find out tag that belongs to this task * @task: the task sent to the LLDD * @tag: the found tag associated with the task */ static int pm8001_find_tag(struct sas_task *task, u32 *tag) { if (task->lldd_task) { struct pm8001_ccb_info *ccb; ccb = task->lldd_task; *tag = ccb->ccb_tag; return 1; } return 0; } /** * pm8001_tag_clear - clear the tags bitmap * @pm8001_ha: our hba struct * @tag: the found tag associated with the task */ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) { void *bitmap = pm8001_ha->tags; clear_bit(tag, bitmap); } static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag) { void *bitmap = pm8001_ha->tags; set_bit(tag, bitmap); } /** * pm8001_tag_alloc - allocate a empty tag for task used. * @pm8001_ha: our hba struct * @tag_out: the found empty tag . */ inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) { unsigned int index, tag; void *bitmap = pm8001_ha->tags; index = find_first_zero_bit(bitmap, pm8001_ha->tags_num); tag = index; if (tag >= pm8001_ha->tags_num) return -SAS_QUEUE_FULL; pm8001_tag_set(pm8001_ha, tag); *tag_out = tag; return 0; } void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) { int i; for (i = 0; i < pm8001_ha->tags_num; ++i) pm8001_tag_clear(pm8001_ha, i); } /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. * @virt_addr: the allocated virtual address * @pphys_addr_hi: the physical address high byte address. * @pphys_addr_lo: the physical address low byte address. * @mem_size: memory size. */ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align) { caddr_t mem_virt_alloc; dma_addr_t mem_dma_handle; u64 phys_align; u64 align_offset = 0; if (align) align_offset = (dma_addr_t)align - 1; mem_virt_alloc = pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle); if (!mem_virt_alloc) { pm8001_printk("memory allocation error\n"); return -1; } memset((void *)mem_virt_alloc, 0, mem_size+align); *pphys_addr = mem_dma_handle; phys_align = (*pphys_addr + align_offset) & ~align_offset; *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; *pphys_addr_hi = upper_32_bits(phys_align); *pphys_addr_lo = lower_32_bits(phys_align); return 0; } /** * pm8001_find_ha_by_dev - from domain device which come from sas layer to * find out our hba struct. * @dev: the domain device which from sas layer. */ static struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) { struct sas_ha_struct *sha = dev->port->ha; struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; return pm8001_ha; } /** * pm8001_phy_control - this function should be registered to * sas_domain_function_template to provide libsas used, note: this is just * control the HBA phy rather than other expander phy if you want control * other phy, you should use SMP command. * @sas_phy: which phy in HBA phys. * @func: the operation. * @funcdata: always NULL. */ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { int rc = 0, phy_id = sas_phy->id; struct pm8001_hba_info *pm8001_ha = NULL; struct sas_phy_linkrates *rates; DECLARE_COMPLETION_ONSTACK(completion); pm8001_ha = sas_phy->ha->lldd_ha; pm8001_ha->phy[phy_id].enable_completion = &completion; switch (func) { case PHY_FUNC_SET_LINK_RATE: rates = funcdata; if (rates->minimum_linkrate) { pm8001_ha->phy[phy_id].minimum_linkrate = rates->minimum_linkrate; } if (rates->maximum_linkrate) { pm8001_ha->phy[phy_id].maximum_linkrate = rates->maximum_linkrate; } if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_HARD_RESET: if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); break; case PHY_FUNC_LINK_RESET: if (pm8001_ha->phy[phy_id].phy_state == 0) { PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); wait_for_completion(&completion); } PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_RELEASE_SPINUP_HOLD: PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_LINK_RESET); break; case PHY_FUNC_DISABLE: PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); break; default: rc = -EOPNOTSUPP; } msleep(300); return rc; } int pm8001_slave_alloc(struct scsi_device *scsi_dev) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); if (dev_is_sata(dev)) { /* We don't need to rescan targets * if REPORT_LUNS request is failed */ if (scsi_dev->lun > 0) return -ENXIO; scsi_dev->tagged_supported = 1; } return sas_slave_alloc(scsi_dev); } /** * pm8001_scan_start - we should enable all HBA phys by sending the phy_start * command to HBA. * @shost: the scsi host data. */ void pm8001_scan_start(struct Scsi_Host *shost) { int i; struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) { /* give the phy enabling interrupt event time to come in (1s * is empirically about all it takes) */ if (time < HZ) return 0; /* Wait for discovery to finish */ scsi_flush_work(shost); return 1; } /** * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to smp task */ static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); } u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) { struct ata_queued_cmd *qc = task->uldd_task; if (qc) { if (qc->tf.command == ATA_CMD_FPDMA_WRITE || qc->tf.command == ATA_CMD_FPDMA_READ) { *tag = qc->tag; return 1; } } return 0; } /** * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to sata task */ static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); } /** * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data * @pm8001_ha: our hba card information * @ccb: the ccb which attached to TM * @tmf: the task management IU */ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); } /** * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task */ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); } int pm8001_slave_configure(struct scsi_device *sdev) { struct domain_device *dev = sdev_to_domain_dev(sdev); int ret = sas_slave_configure(sdev); if (ret) return ret; if (dev_is_sata(dev)) { #ifdef PM8001_DISABLE_NCQ struct ata_port *ap = dev->sata_dev.ap; struct ata_device *adev = ap->link.device; adev->flags |= ATA_DFLAG_NCQ_OFF; scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); #endif } return 0; } /* Find the local port id that's attached to this device */ static int sas_find_local_port_id(struct domain_device *dev) { struct domain_device *pdev = dev->parent; /* Directly attached device */ if (!pdev) return dev->port->id; while (pdev) { struct domain_device *pdev_p = pdev->parent; if (!pdev_p) return pdev->port->id; pdev = pdev->parent; } return 0; } /** * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. * @task: the task to be execute. * @num: if can_queue great than 1, the task can be queued up. for SMP task, * we always execute one one time. * @gfp_flags: gfp_flags. * @is_tmf: if it is task management task. * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { struct domain_device *dev = task->dev; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev; struct pm8001_port *port = NULL; struct sas_task *t = task; struct pm8001_ccb_info *ccb; u32 tag = 0xdeadbeef, rc, n_elem = 0; u32 n = num; unsigned long flags = 0, flags_libsas = 0; if (!dev->port) { struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; if (dev->dev_type != SATA_DEV) t->task_done(t); return 0; } pm8001_ha = pm8001_find_ha_by_dev(task->dev); PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); spin_lock_irqsave(&pm8001_ha->lock, flags); do { dev = t->dev; pm8001_dev = dev->lldd_dev; if (DEV_IS_GONE(pm8001_dev)) { if (pm8001_dev) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("device %d not ready.\n", pm8001_dev->device_id)); } else { PM8001_IO_DBG(pm8001_ha, pm8001_printk("device %016llx not " "ready.\n", SAS_ADDR(dev->sas_addr))); } rc = SAS_PHY_DOWN; goto out_done; } port = &pm8001_ha->port[sas_find_local_port_id(dev)]; if (!port->port_attached) { if (sas_protocol_ata(t->task_proto)) { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; spin_unlock_irqrestore(&pm8001_ha->lock, flags); spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags_libsas); t->task_done(t); spin_lock_irqsave(dev->sata_dev.ap->lock, flags_libsas); spin_lock_irqsave(&pm8001_ha->lock, flags); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } else { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; t->task_done(t); if (n > 1) t = list_entry(t->list.next, struct sas_task, list); continue; } } rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) goto err_out; ccb = &pm8001_ha->ccb_info[tag]; if (!sas_protocol_ata(t->task_proto)) { if (t->num_scatter) { n_elem = dma_map_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); if (!n_elem) { rc = -ENOMEM; goto err_out_tag; } } } else { n_elem = t->num_scatter; } t->lldd_task = ccb; ccb->n_elem = n_elem; ccb->ccb_tag = tag; ccb->task = t; switch (t->task_proto) { case SAS_PROTOCOL_SMP: rc = pm8001_task_prep_smp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SSP: if (is_tmf) rc = pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf); else rc = pm8001_task_prep_ssp(pm8001_ha, ccb); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: rc = pm8001_task_prep_ata(pm8001_ha, ccb); break; default: dev_printk(KERN_ERR, pm8001_ha->dev, "unknown sas_task proto: 0x%x\n", t->task_proto); rc = -EINVAL; break; } if (rc) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("rc is %x\n", rc)); goto err_out_tag; } /* TODO: select normal or high priority */ spin_lock(&t->task_state_lock); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock(&t->task_state_lock); pm8001_dev->running_req++; if (n > 1) t = list_entry(t->list.next, struct sas_task, list); } while (--n); rc = 0; goto out_done; err_out_tag: pm8001_tag_free(pm8001_ha, tag); err_out: dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); if (!sas_protocol_ata(t->task_proto)) if (n_elem) dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem, t->data_dir); out_done: spin_unlock_irqrestore(&pm8001_ha->lock, flags); return rc; } /** * pm8001_queue_command - register for upper layer used, all IO commands sent * to HBA are from this interface. * @task: the task to be execute. * @num: if can_queue great than 1, the task can be queued up. for SMP task, * we always execute one one time * @gfp_flags: gfp_flags */ int pm8001_queue_command(struct sas_task *task, const int num, gfp_t gfp_flags) { return pm8001_task_exec(task, num, gfp_flags, 0, NULL); } void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx) { pm8001_tag_clear(pm8001_ha, ccb_idx); } /** * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task * @task: the task to be free. * @ccb_idx: ccb index. */ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) { if (!ccb->task) return; if (!sas_protocol_ata(task->task_proto)) if (ccb->n_elem) dma_unmap_sg(pm8001_ha->dev, task->scatter, task->num_scatter, task->data_dir); switch (task->task_proto) { case SAS_PROTOCOL_SMP: dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); break; case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SSP: default: /* do nothing */ break; } task->lldd_task = NULL; ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, ccb_idx); } /** * pm8001_alloc_dev - find a empty pm8001_device * @pm8001_ha: our hba card information */ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } } if (dev == PM8001_MAX_DEVICES) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("max support %d devices, ignore ..\n", PM8001_MAX_DEVICES)); } return NULL; } static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; pm8001_dev->dev_type = NO_DEVICE; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } /** * pm8001_dev_found_notify - libsas notify a device is found. * @dev: the device structure which sas layer used. * * when libsas find a sas domain device, it should tell the LLDD that * device is found, and then LLDD register this device to HBA firmware * by the command "OPC_INB_REG_DEV", after that the HBA will assign a * device ID(according to device's sas address) and returned it to LLDD. From * now on, we communicate with HBA FW with the device ID which HBA assigned * rather than sas address. it is the necessary step for our HBA but it is * the optional for other HBA driver. */ static int pm8001_dev_found_notify(struct domain_device *dev) { unsigned long flags = 0; int res = 0; struct pm8001_hba_info *pm8001_ha = NULL; struct domain_device *parent_dev = dev->parent; struct pm8001_device *pm8001_device; DECLARE_COMPLETION_ONSTACK(completion); u32 flag = 0; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_device = pm8001_alloc_dev(pm8001_ha); if (!pm8001_device) { res = -1; goto found_out; } pm8001_device->sas_device = dev; dev->lldd_dev = pm8001_device; pm8001_device->dev_type = dev->dev_type; pm8001_device->dcompletion = &completion; if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { int phy_id; struct ex_phy *phy; for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; phy_id++) { phy = &parent_dev->ex_dev.ex_phy[phy_id]; if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(dev->sas_addr)) { pm8001_device->attached_phy = phy_id; break; } } if (phy_id == parent_dev->ex_dev.num_phys) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Error: no attached dev:%016llx" " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr))); res = -1; } } else { if (dev->dev_type == SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ } } /*register this device to HBA*/ PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n")); PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); if (dev->dev_type == SAS_END_DEV) msleep(50); pm8001_ha->flags |= PM8001F_RUN_TIME ; return 0; found_out: spin_unlock_irqrestore(&pm8001_ha->lock, flags); return res; } int pm8001_dev_found(struct domain_device *dev) { return pm8001_dev_found_notify(dev); } /** * pm8001_alloc_task - allocate a task structure for TMF */ static struct sas_task *pm8001_alloc_task(void) { struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); if (task) { INIT_LIST_HEAD(&task->list); spin_lock_init(&task->task_state_lock); task->task_state_flags = SAS_TASK_STATE_PENDING; init_timer(&task->timer); init_completion(&task->completion); } return task; } static void pm8001_free_task(struct sas_task *task) { if (task) { BUG_ON(!list_empty(&task->list)); kfree(task); } } static void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) return; complete(&task->completion); } static void pm8001_tmf_timedout(unsigned long data) { struct sas_task *task = (struct sas_task *)data; task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->completion); } #define PM8001_TASK_TIMEOUT 20 /** * pm8001_exec_internal_tmf_task - execute some task management commands. * @dev: the wanted device. * @tmf: which task management wanted to be take. * @para_len: para_len. * @parameter: ssp task parameter. * * when errors or exception happened, we may want to do something, for example * abort the issued task which result in this execption, it is done by calling * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) { int res, retry; struct sas_task *task = NULL; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); for (retry = 0; retry < 3; retry++) { task = pm8001_alloc_task(); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; memcpy(&task->ssp_task, parameter, para_len); task->task_done = pm8001_task_done; task->timer.data = (unsigned long)task; task->timer.function = pm8001_tmf_timedout; task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; add_timer(&task->timer); res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); if (res) { del_timer(&task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } wait_for_completion(&task->completion); res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TMF task[%x]timeout.\n", tmf->tmf)); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_UNDERRUN) { /* no error, but return the number of bytes of * underrun */ res = task->task_status.residual; break; } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAS_DATA_OVERRUN) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Blocked task error.\n")); res = -EMSGSIZE; break; } else { PM8001_EH_DBG(pm8001_ha, pm8001_printk(" Task to dev %016llx response:" "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); pm8001_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); if (task != NULL) pm8001_free_task(task); return res; } static int pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, u32 task_tag) { int res, retry; u32 ccb_tag; struct pm8001_ccb_info *ccb; struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { task = pm8001_alloc_task(); if (!task) return -ENOMEM; task->dev = dev; task->task_proto = dev->tproto; task->task_done = pm8001_task_done; task->timer.data = (unsigned long)task; task->timer.function = pm8001_tmf_timedout; task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; add_timer(&task->timer); res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) return res; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; res = PM8001_CHIP_DISP->task_abort(pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); if (res) { del_timer(&task->timer); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing internal task " "failed\n")); goto ex_err; } wait_for_completion(&task->completion); res = TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TMF task timeout.\n")); goto ex_err; } } if (task->task_status.resp == SAS_TASK_COMPLETE && task->task_status.stat == SAM_STAT_GOOD) { res = TMF_RESP_FUNC_COMPLETE; break; } else { PM8001_EH_DBG(pm8001_ha, pm8001_printk(" Task to dev %016llx response: " "0x%x status 0x%x\n", SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); pm8001_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); if (task != NULL) pm8001_free_task(task); return res; } /** * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" * @dev: the device structure which sas layer used. */ static void pm8001_dev_gone_notify(struct domain_device *dev) { unsigned long flags = 0; u32 tag; struct pm8001_hba_info *pm8001_ha; struct pm8001_device *pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); spin_lock_irqsave(&pm8001_ha->lock, flags); pm8001_tag_alloc(pm8001_ha, &tag); if (pm8001_dev) { u32 device_id = pm8001_dev->device_id; PM8001_DISC_DBG(pm8001_ha, pm8001_printk("found dev[%d:%x] is gone.\n", pm8001_dev->device_id, pm8001_dev->dev_type)); if (pm8001_dev->running_req) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); pm8001_free_dev(pm8001_dev); } else { PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found dev has gone.\n")); } dev->lldd_dev = NULL; spin_unlock_irqrestore(&pm8001_ha->lock, flags); } void pm8001_dev_gone(struct domain_device *dev) { pm8001_dev_gone_notify(dev); } static int pm8001_issue_ssp_tmf(struct domain_device *dev, u8 *lun, struct pm8001_tmf_task *tmf) { struct sas_ssp_task ssp_task; if (!(dev->tproto & SAS_PROTOCOL_SSP)) return TMF_RESP_FUNC_ESUPP; strncpy((u8 *)&ssp_task.LUN, lun, 8); return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), tmf); } /** * Standard mandates link reset for ATA (type 0) and hard reset for * SSP (type 1) , only for RECOVERY */ int pm8001_I_T_nexus_reset(struct domain_device *dev) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_device *pm8001_dev; struct pm8001_hba_info *pm8001_ha; struct sas_phy *phy; if (!dev || !dev->lldd_dev) return -1; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); phy = sas_find_local_phy(dev); if (dev_is_sata(dev)) { DECLARE_COMPLETION_ONSTACK(completion_setstate); if (scsi_is_sas_phy_local(phy)) return 0; rc = sas_phy_reset(phy, 1); msleep(2000); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); pm8001_dev->setds_completion = &completion_setstate; rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); wait_for_completion(&completion_setstate); } else{ rc = sas_phy_reset(phy, 1); msleep(2000); } PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", pm8001_dev->device_id, rc)); return rc; } /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); if (dev_is_sata(dev)) { struct sas_phy *phy = sas_find_local_phy(dev); rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , dev, 1, 0); rc = sas_phy_reset(phy, 1); rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, pm8001_dev, 0x01); msleep(2000); } else { tmf_task.tmf = TMF_LU_RESET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); } /* If failed, fall-through I_T_Nexus reset */ PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", pm8001_dev->device_id, rc)); return rc; } /* optional SAM-3 */ int pm8001_query_task(struct sas_task *task) { u32 tag = 0xdeadbeef; int i = 0; struct scsi_lun lun; struct pm8001_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; if (unlikely(!task || !task->lldd_task || !task->dev)) return rc; if (task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; struct domain_device *dev = task->dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); int_to_scsilun(cmnd->device->lun, &lun); rc = pm8001_find_tag(task, &tag); if (rc == 0) { rc = TMF_RESP_FUNC_FAILED; return rc; } PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); for (i = 0; i < 16; i++) printk(KERN_INFO "%02x ", cmnd->cmnd[i]); printk(KERN_INFO "]\n"); tmf_task.tmf = TMF_QUERY_TASK; tmf_task.tag_of_task_to_be_managed = tag; rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); switch (rc) { /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: PM8001_EH_DBG(pm8001_ha, pm8001_printk("The task is still in Lun \n")); /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: PM8001_EH_DBG(pm8001_ha, pm8001_printk("The task is not in Lun or failed," " reset the phy \n")); break; } } pm8001_printk(":rc= %d\n", rc); return rc; } /* mandatory SAM-3, still need free task/ccb info, abord the specified task */ int pm8001_abort_task(struct sas_task *task) { unsigned long flags; u32 tag = 0xdeadbeef; u32 device_id; struct domain_device *dev ; struct pm8001_hba_info *pm8001_ha = NULL; struct pm8001_ccb_info *ccb; struct scsi_lun lun; struct pm8001_device *pm8001_dev; struct pm8001_tmf_task tmf_task; int rc = TMF_RESP_FUNC_FAILED; if (unlikely(!task || !task->lldd_task || !task->dev)) return rc; spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } spin_unlock_irqrestore(&task->task_state_lock, flags); if (task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; dev = task->dev; ccb = task->lldd_task; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); int_to_scsilun(cmnd->device->lun, &lun); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } device_id = pm8001_dev->device_id; PM8001_EH_DBG(pm8001_ha, pm8001_printk("abort io to deviceid= %d\n", device_id)); tmf_task.tmf = TMF_ABORT_TASK; tmf_task.tag_of_task_to_be_managed = tag; rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { dev = task->dev; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } else if (task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ dev = task->dev; pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); rc = pm8001_find_tag(task, &tag); if (rc == 0) { printk(KERN_INFO "No such tag in %s\n", __func__); rc = TMF_RESP_FUNC_FAILED; return rc; } rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, pm8001_dev->sas_device, 0, tag); } out: if (rc != TMF_RESP_FUNC_COMPLETE) pm8001_printk("rc= %d\n", rc); return rc; } int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_ABORT_TASK_SET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int pm8001_clear_aca(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_ACA; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; } int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) { int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); PM8001_EH_DBG(pm8001_ha, pm8001_printk("I_T_L_Q clear task set[%x]\n", pm8001_dev->device_id)); tmf_task.tmf = TMF_CLEAR_TASK_SET; rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); return rc; }
gpl-2.0
jxxhwy/Thunder-Kenel-JB-N719
drivers/spi/spi_txx9.c
3687
12273
/* * spi_txx9.c - TXx9 SPI controller driver. * * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c * Copyright (C) 2000-2001 Toshiba Corporation * * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the * terms of the GNU General Public License version 2. This program is * licensed "as is" without any warranty of any kind, whether express * or implied. * * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com) * * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp) */ #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/gpio.h> #define SPI_FIFO_SIZE 4 #define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */ #define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */ #define TXx9_SPMCR 0x00 #define TXx9_SPCR0 0x04 #define TXx9_SPCR1 0x08 #define TXx9_SPFS 0x0c #define TXx9_SPSR 0x14 #define TXx9_SPDR 0x18 /* SPMCR : SPI Master Control */ #define TXx9_SPMCR_OPMODE 0xc0 #define TXx9_SPMCR_CONFIG 0x40 #define TXx9_SPMCR_ACTIVE 0x80 #define TXx9_SPMCR_SPSTP 0x02 #define TXx9_SPMCR_BCLR 0x01 /* SPCR0 : SPI Control 0 */ #define TXx9_SPCR0_TXIFL_MASK 0xc000 #define TXx9_SPCR0_RXIFL_MASK 0x3000 #define TXx9_SPCR0_SIDIE 0x0800 #define TXx9_SPCR0_SOEIE 0x0400 #define TXx9_SPCR0_RBSIE 0x0200 #define TXx9_SPCR0_TBSIE 0x0100 #define TXx9_SPCR0_IFSPSE 0x0010 #define TXx9_SPCR0_SBOS 0x0004 #define TXx9_SPCR0_SPHA 0x0002 #define TXx9_SPCR0_SPOL 0x0001 /* SPSR : SPI Status */ #define TXx9_SPSR_TBSI 0x8000 #define TXx9_SPSR_RBSI 0x4000 #define TXx9_SPSR_TBS_MASK 0x3800 #define TXx9_SPSR_RBS_MASK 0x0700 #define TXx9_SPSR_SPOE 0x0080 #define TXx9_SPSR_IFSD 0x0008 #define TXx9_SPSR_SIDLE 0x0004 #define TXx9_SPSR_STRDY 0x0002 #define TXx9_SPSR_SRRDY 0x0001 struct txx9spi { struct workqueue_struct *workqueue; struct work_struct work; spinlock_t lock; /* protect 'queue' */ struct list_head queue; wait_queue_head_t waitq; void __iomem *membase; int baseclk; struct clk *clk; u32 max_speed_hz, min_speed_hz; int last_chipselect; int last_chipselect_val; }; static u32 txx9spi_rd(struct txx9spi *c, int reg) { return __raw_readl(c->membase + reg); } static void txx9spi_wr(struct txx9spi *c, u32 val, int reg) { __raw_writel(val, c->membase + reg); } static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c, int on, unsigned int cs_delay) { int val = (spi->mode & SPI_CS_HIGH) ? on : !on; if (on) { /* deselect the chip with cs_change hint in last transfer */ if (c->last_chipselect >= 0) gpio_set_value(c->last_chipselect, !c->last_chipselect_val); c->last_chipselect = spi->chip_select; c->last_chipselect_val = val; } else { c->last_chipselect = -1; ndelay(cs_delay); /* CS Hold Time */ } gpio_set_value(spi->chip_select, val); ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */ } static int txx9spi_setup(struct spi_device *spi) { struct txx9spi *c = spi_master_get_devdata(spi->master); u8 bits_per_word; if (!spi->max_speed_hz || spi->max_speed_hz > c->max_speed_hz || spi->max_speed_hz < c->min_speed_hz) return -EINVAL; bits_per_word = spi->bits_per_word; if (bits_per_word != 8 && bits_per_word != 16) return -EINVAL; if (gpio_direction_output(spi->chip_select, !(spi->mode & SPI_CS_HIGH))) { dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n"); return -EINVAL; } /* deselect chip */ spin_lock(&c->lock); txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz); spin_unlock(&c->lock); return 0; } static irqreturn_t txx9spi_interrupt(int irq, void *dev_id) { struct txx9spi *c = dev_id; /* disable rx intr */ txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE, TXx9_SPCR0); wake_up(&c->waitq); return IRQ_HANDLED; } static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m) { struct spi_device *spi = m->spi; struct spi_transfer *t; unsigned int cs_delay; unsigned int cs_change = 1; int status = 0; u32 mcr; u32 prev_speed_hz = 0; u8 prev_bits_per_word = 0; /* CS setup/hold/recovery time in nsec */ cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz; mcr = txx9spi_rd(c, TXx9_SPMCR); if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) { dev_err(&spi->dev, "Bad mode.\n"); status = -EIO; goto exit; } mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); txx9spi_wr(c, TXx9_SPCR0_SBOS | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0) | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0) | 0x08, TXx9_SPCR0); list_for_each_entry (t, &m->transfers, transfer_list) { const void *txbuf = t->tx_buf; void *rxbuf = t->rx_buf; u32 data; unsigned int len = t->len; unsigned int wsize; u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; bits_per_word = bits_per_word ? : 8; wsize = bits_per_word >> 3; /* in bytes */ if (prev_speed_hz != speed_hz || prev_bits_per_word != bits_per_word) { int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1; n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1); /* enter active mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR); prev_speed_hz = speed_hz; prev_bits_per_word = bits_per_word; } if (cs_change) txx9spi_cs_func(spi, c, 1, cs_delay); cs_change = t->cs_change; while (len) { unsigned int count = SPI_FIFO_SIZE; int i; u32 cr0; if (len < count * wsize) count = len / wsize; /* now tx must be idle... */ while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE)) cpu_relax(); cr0 = txx9spi_rd(c, TXx9_SPCR0); cr0 &= ~TXx9_SPCR0_RXIFL_MASK; cr0 |= (count - 1) << 12; /* enable rx intr */ cr0 |= TXx9_SPCR0_RBSIE; txx9spi_wr(c, cr0, TXx9_SPCR0); /* send */ for (i = 0; i < count; i++) { if (txbuf) { data = (wsize == 1) ? *(const u8 *)txbuf : *(const u16 *)txbuf; txx9spi_wr(c, data, TXx9_SPDR); txbuf += wsize; } else txx9spi_wr(c, 0, TXx9_SPDR); } /* wait all rx data */ wait_event(c->waitq, txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI); /* receive */ for (i = 0; i < count; i++) { data = txx9spi_rd(c, TXx9_SPDR); if (rxbuf) { if (wsize == 1) *(u8 *)rxbuf = data; else *(u16 *)rxbuf = data; rxbuf += wsize; } } len -= count * wsize; } m->actual_length += t->len; if (t->delay_usecs) udelay(t->delay_usecs); if (!cs_change) continue; if (t->transfer_list.next == &m->transfers) break; /* sometimes a short mid-message deselect of the chip * may be needed to terminate a mode or command */ txx9spi_cs_func(spi, c, 0, cs_delay); } exit: m->status = status; m->complete(m->context); /* normally deactivate chipselect ... unless no error and * cs_change has hinted that the next message will probably * be for this chip too. */ if (!(status == 0 && cs_change)) txx9spi_cs_func(spi, c, 0, cs_delay); /* enter config mode */ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); } static void txx9spi_work(struct work_struct *work) { struct txx9spi *c = container_of(work, struct txx9spi, work); unsigned long flags; spin_lock_irqsave(&c->lock, flags); while (!list_empty(&c->queue)) { struct spi_message *m; m = container_of(c->queue.next, struct spi_message, queue); list_del_init(&m->queue); spin_unlock_irqrestore(&c->lock, flags); txx9spi_work_one(c, m); spin_lock_irqsave(&c->lock, flags); } spin_unlock_irqrestore(&c->lock, flags); } static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m) { struct spi_master *master = spi->master; struct txx9spi *c = spi_master_get_devdata(master); struct spi_transfer *t; unsigned long flags; m->actual_length = 0; /* check each transfer's parameters */ list_for_each_entry (t, &m->transfers, transfer_list) { u32 speed_hz = t->speed_hz ? : spi->max_speed_hz; u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word; bits_per_word = bits_per_word ? : 8; if (!t->tx_buf && !t->rx_buf && t->len) return -EINVAL; if (bits_per_word != 8 && bits_per_word != 16) return -EINVAL; if (t->len & ((bits_per_word >> 3) - 1)) return -EINVAL; if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz) return -EINVAL; } spin_lock_irqsave(&c->lock, flags); list_add_tail(&m->queue, &c->queue); queue_work(c->workqueue, &c->work); spin_unlock_irqrestore(&c->lock, flags); return 0; } static int __init txx9spi_probe(struct platform_device *dev) { struct spi_master *master; struct txx9spi *c; struct resource *res; int ret = -ENODEV; u32 mcr; int irq; master = spi_alloc_master(&dev->dev, sizeof(*c)); if (!master) return ret; c = spi_master_get_devdata(master); platform_set_drvdata(dev, master); INIT_WORK(&c->work, txx9spi_work); spin_lock_init(&c->lock); INIT_LIST_HEAD(&c->queue); init_waitqueue_head(&c->waitq); c->clk = clk_get(&dev->dev, "spi-baseclk"); if (IS_ERR(c->clk)) { ret = PTR_ERR(c->clk); c->clk = NULL; goto exit; } ret = clk_enable(c->clk); if (ret) { clk_put(c->clk); c->clk = NULL; goto exit; } c->baseclk = clk_get_rate(c->clk); c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1); c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1); res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) goto exit_busy; if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res), "spi_txx9")) goto exit_busy; c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res)); if (!c->membase) goto exit_busy; /* enter config mode */ mcr = txx9spi_rd(c, TXx9_SPMCR); mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); irq = platform_get_irq(dev, 0); if (irq < 0) goto exit_busy; ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0, "spi_txx9", c); if (ret) goto exit; c->workqueue = create_singlethread_workqueue( dev_name(master->dev.parent)); if (!c->workqueue) goto exit_busy; c->last_chipselect = -1; dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", (unsigned long long)res->start, irq, (c->baseclk + 500000) / 1000000); /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; master->bus_num = dev->id; master->setup = txx9spi_setup; master->transfer = txx9spi_transfer; master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */ ret = spi_register_master(master); if (ret) goto exit; return 0; exit_busy: ret = -EBUSY; exit: if (c->workqueue) destroy_workqueue(c->workqueue); if (c->clk) { clk_disable(c->clk); clk_put(c->clk); } platform_set_drvdata(dev, NULL); spi_master_put(master); return ret; } static int __exit txx9spi_remove(struct platform_device *dev) { struct spi_master *master = spi_master_get(platform_get_drvdata(dev)); struct txx9spi *c = spi_master_get_devdata(master); spi_unregister_master(master); platform_set_drvdata(dev, NULL); destroy_workqueue(c->workqueue); clk_disable(c->clk); clk_put(c->clk); spi_master_put(master); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:spi_txx9"); static struct platform_driver txx9spi_driver = { .remove = __exit_p(txx9spi_remove), .driver = { .name = "spi_txx9", .owner = THIS_MODULE, }, }; static int __init txx9spi_init(void) { return platform_driver_probe(&txx9spi_driver, txx9spi_probe); } subsys_initcall(txx9spi_init); static void __exit txx9spi_exit(void) { platform_driver_unregister(&txx9spi_driver); } module_exit(txx9spi_exit); MODULE_DESCRIPTION("TXx9 SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
vivilyu/android_kernel_huawei_c8813
arch/m68k/mm/memory.c
4455
7769
/* * linux/arch/m68k/mm/memory.c * * Copyright (C) 1995 Hamish Macdonald */ #include <linux/module.h> #include <linux/mm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/gfp.h> #include <asm/setup.h> #include <asm/segment.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/traps.h> #include <asm/machdep.h> /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from struct page instead of separately kmalloced struct. Stolen from arch/sparc/mm/srmmu.c ... */ typedef struct list_head ptable_desc; static LIST_HEAD(ptable_list); #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) #define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index) #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) void __init init_pointer_table(unsigned long ptable) { ptable_desc *dp; unsigned long page = ptable & PAGE_MASK; unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE); dp = PD_PTABLE(page); if (!(PD_MARKBITS(dp) & mask)) { PD_MARKBITS(dp) = 0xff; list_add(dp, &ptable_list); } PD_MARKBITS(dp) &= ~mask; #ifdef DEBUG printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); #endif /* unreserve the page so it's possible to free that page */ PD_PAGE(dp)->flags &= ~(1 << PG_reserved); init_page_count(PD_PAGE(dp)); return; } pmd_t *get_pointer_table (void) { ptable_desc *dp = ptable_list.next; unsigned char mask = PD_MARKBITS (dp); unsigned char tmp; unsigned int off; /* * For a pointer table for a user process address space, a * table is taken from a page allocated for the purpose. Each * page can hold 8 pointer tables. The page is remapped in * virtual address space to be noncacheable. */ if (mask == 0) { void *page; ptable_desc *new; if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) return NULL; flush_tlb_kernel_page(page); nocache_page(page); new = PD_PTABLE(page); PD_MARKBITS(new) = 0xfe; list_add_tail(new, dp); return (pmd_t *)page; } for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE) ; PD_MARKBITS(dp) = mask & ~tmp; if (!PD_MARKBITS(dp)) { /* move to end of list */ list_move_tail(dp, &ptable_list); } return (pmd_t *) (page_address(PD_PAGE(dp)) + off); } int free_pointer_table (pmd_t *ptable) { ptable_desc *dp; unsigned long page = (unsigned long)ptable & PAGE_MASK; unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE); dp = PD_PTABLE(page); if (PD_MARKBITS (dp) & mask) panic ("table already free!"); PD_MARKBITS (dp) |= mask; if (PD_MARKBITS(dp) == 0xff) { /* all tables in page are free, free page */ list_del(dp); cache_page((void *)page); free_page (page); return 1; } else if (ptable_list.next != dp) { /* * move this descriptor to the front of the list, since * it has one or more free tables. */ list_move(dp, &ptable_list); } return 0; } /* invalidate page in both caches */ static inline void clear040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cinvp %%bc,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* invalidate page in i-cache */ static inline void cleari040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cinvp %%ic,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* push page in both caches */ /* RZ: cpush %bc DOES invalidate %ic, regardless of DPI */ static inline void push040(unsigned long paddr) { asm volatile ( "nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (paddr)); } /* push and invalidate page in both caches, must disable ints * to avoid invalidating valid data */ static inline void pushcl040(unsigned long paddr) { unsigned long flags; local_irq_save(flags); push040(paddr); if (CPU_IS_060) clear040(paddr); local_irq_restore(flags); } /* * 040: Hit every page containing an address in the range paddr..paddr+len-1. * (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). * Hit every page until there is a page or less to go. Hit the next page, * and the one after that if the range hits it. */ /* ++roman: A little bit more care is required here: The CINVP instruction * invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning * and the end of the region must be treated differently if they are not * exactly at the beginning or end of a page boundary. Else, maybe too much * data becomes invalidated and thus lost forever. CPUSHP does what we need: * it invalidates the page after pushing dirty data to memory. (Thanks to Jes * for discovering the problem!) */ /* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set * the DPI bit in the CACR; would it cause problems with temporarily changing * this?). So we have to push first and then additionally to invalidate. */ /* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will * be overwritten anyway, e.g. by DMA to memory. The range is defined by a * _physical_ address. */ void cache_clear (unsigned long paddr, int len) { if (CPU_IS_COLDFIRE) { flush_cf_bcache(0, DCACHE_MAX_ADDR); } else if (CPU_IS_040_OR_060) { int tmp; /* * We need special treatment for the first page, in case it * is not page-aligned. Page align the addresses to work * around bug I17 in the 68060. */ if ((tmp = -paddr & (PAGE_SIZE - 1))) { pushcl040(paddr & PAGE_MASK); if ((len -= tmp) <= 0) return; paddr += tmp; } tmp = PAGE_SIZE; paddr &= PAGE_MASK; while ((len -= tmp) >= 0) { clear040(paddr); paddr += tmp; } if ((len += tmp)) /* a page boundary gets crossed at the end */ pushcl040(paddr); } else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I_AND_D) : "d0"); #ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(0); #endif } EXPORT_SYMBOL(cache_clear); /* * cache_push() semantics: Write back any dirty cache data in the given area, * and invalidate the range in the instruction cache. It needs not (but may) * invalidate those entries also in the data cache. The range is defined by a * _physical_ address. */ void cache_push (unsigned long paddr, int len) { if (CPU_IS_COLDFIRE) { flush_cf_bcache(0, DCACHE_MAX_ADDR); } else if (CPU_IS_040_OR_060) { int tmp = PAGE_SIZE; /* * on 68040 or 68060, push cache lines for pages in the range; * on the '040 this also invalidates the pushed lines, but not on * the '060! */ len += paddr & (PAGE_SIZE - 1); /* * Work around bug I17 in the 68060 affecting some instruction * lines not being invalidated properly. */ paddr &= PAGE_MASK; do { push040(paddr); paddr += tmp; } while ((len -= tmp) > 0); } /* * 68030/68020 have no writeback cache. On the other hand, * cache_push is actually a superset of cache_clear (the lines * get written back and invalidated), so we should make sure * to perform the corresponding actions. After all, this is getting * called in places where we've just loaded code, or whatever, so * flushing the icache is appropriate; flushing the dcache shouldn't * be required. */ else /* 68030 or 68020 */ asm volatile ("movec %/cacr,%/d0\n\t" "oriw %0,%/d0\n\t" "movec %/d0,%/cacr" : : "i" (FLUSH_I) : "d0"); #ifdef CONFIG_M68K_L2_CACHE if(mach_l2_flush) mach_l2_flush(1); #endif } EXPORT_SYMBOL(cache_push);
gpl-2.0