repo_name
string
path
string
copies
string
size
string
content
string
license
string
android-armv7a-belalang-tempur/Android_SpeedKernel_3.4
fs/afs/super.c
4625
12575
/* AFS superblock handling * * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Howells <dhowells@redhat.com> * David Woodhouse <dwmw2@infradead.org> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/sched.h> #include "internal.h" #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ static void afs_i_init_once(void *foo); static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static void afs_kill_super(struct super_block *sb); static struct inode *afs_alloc_inode(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .mount = afs_mount, .kill_sb = afs_kill_super, .fs_flags = 0, }; static const struct super_operations afs_super_ops = { .statfs = afs_statfs, .alloc_inode = afs_alloc_inode, .drop_inode = afs_drop_inode, .destroy_inode = afs_destroy_inode, .evict_inode = afs_evict_inode, .show_options = generic_show_options, }; static struct kmem_cache *afs_inode_cachep; static atomic_t afs_count_active_inodes; enum { afs_no_opt, afs_opt_cell, afs_opt_rwpath, afs_opt_vol, afs_opt_autocell, }; static const match_table_t afs_options_list = { { afs_opt_cell, "cell=%s" }, { afs_opt_rwpath, "rwpath" }, { afs_opt_vol, "vol=%s" }, { afs_opt_autocell, "autocell" }, { afs_no_opt, NULL }, }; /* * initialise the filesystem */ int __init afs_fs_init(void) { int ret; _enter(""); /* create ourselves an inode cache */ atomic_set(&afs_count_active_inodes, 0); ret = -ENOMEM; afs_inode_cachep = kmem_cache_create("afs_inode_cache", sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN, afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; } /* now export our filesystem to lesser mortals */ ret = register_filesystem(&afs_fs_type); if (ret < 0) { kmem_cache_destroy(afs_inode_cachep); _leave(" = %d", ret); return ret; } _leave(" = 0"); return 0; } /* * clean up the filesystem */ void __exit afs_fs_exit(void) { _enter(""); afs_mntpt_kill_timer(); unregister_filesystem(&afs_fs_type); if (atomic_read(&afs_count_active_inodes) != 0) { printk("kAFS: %d active inode objects still present\n", atomic_read(&afs_count_active_inodes)); BUG(); } kmem_cache_destroy(afs_inode_cachep); _leave(""); } /* * parse the mount options * - this function has been shamelessly adapted from the ext3 fs which * shamelessly adapted it from the msdos fs */ static int afs_parse_options(struct afs_mount_params *params, char *options, const char **devname) { struct afs_cell *cell; substring_t args[MAX_OPT_ARGS]; char *p; int token; _enter("%s", options); options[PAGE_SIZE - 1] = 0; while ((p = strsep(&options, ","))) { if (!*p) continue; token = match_token(p, afs_options_list, args); switch (token) { case afs_opt_cell: cell = afs_cell_lookup(args[0].from, args[0].to - args[0].from, false); if (IS_ERR(cell)) return PTR_ERR(cell); afs_put_cell(params->cell); params->cell = cell; break; case afs_opt_rwpath: params->rwpath = 1; break; case afs_opt_vol: *devname = args[0].from; break; case afs_opt_autocell: params->autocell = 1; break; default: printk(KERN_ERR "kAFS:" " Unknown or invalid mount option: '%s'\n", p); return -EINVAL; } } _leave(" = 0"); return 0; } /* * parse a device name to get cell name, volume name, volume type and R/W * selector * - this can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0), * or R/W (rwpath=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume */ static int afs_parse_device_name(struct afs_mount_params *params, const char *name) { struct afs_cell *cell; const char *cellname, *suffix; int cellnamesz; _enter(",%s", name); if (!name) { printk(KERN_ERR "kAFS: no volume name specified\n"); return -EINVAL; } if ((name[0] != '%' && name[0] != '#') || !name[1]) { printk(KERN_ERR "kAFS: unparsable volume name\n"); return -EINVAL; } /* determine the type of volume we're looking for */ params->type = AFSVL_ROVOL; params->force = false; if (params->rwpath || name[0] == '%') { params->type = AFSVL_RWVOL; params->force = true; } name++; /* split the cell name out if there is one */ params->volname = strchr(name, ':'); if (params->volname) { cellname = name; cellnamesz = params->volname - name; params->volname++; } else { params->volname = name; cellname = NULL; cellnamesz = 0; } /* the volume type is further affected by a possible suffix */ suffix = strrchr(params->volname, '.'); if (suffix) { if (strcmp(suffix, ".readonly") == 0) { params->type = AFSVL_ROVOL; params->force = true; } else if (strcmp(suffix, ".backup") == 0) { params->type = AFSVL_BACKVOL; params->force = true; } else if (suffix[1] == 0) { } else { suffix = NULL; } } params->volnamesz = suffix ? suffix - params->volname : strlen(params->volname); _debug("cell %*.*s [%p]", cellnamesz, cellnamesz, cellname ?: "", params->cell); /* lookup the cell record */ if (cellname || !params->cell) { cell = afs_cell_lookup(cellname, cellnamesz, true); if (IS_ERR(cell)) { printk(KERN_ERR "kAFS: unable to lookup cell '%*.*s'\n", cellnamesz, cellnamesz, cellname ?: ""); return PTR_ERR(cell); } afs_put_cell(params->cell); params->cell = cell; } _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s", params->cell->name, params->cell, params->volnamesz, params->volnamesz, params->volname, suffix ?: "-", params->type, params->force ? " FORCE" : ""); return 0; } /* * check a superblock to see if it's the one we're looking for */ static int afs_test_super(struct super_block *sb, void *data) { struct afs_super_info *as1 = data; struct afs_super_info *as = sb->s_fs_info; return as->volume == as1->volume; } static int afs_set_super(struct super_block *sb, void *data) { sb->s_fs_info = data; return set_anon_super(sb, NULL); } /* * fill in the superblock */ static int afs_fill_super(struct super_block *sb, struct afs_mount_params *params) { struct afs_super_info *as = sb->s_fs_info; struct afs_fid fid; struct inode *inode = NULL; int ret; _enter(""); /* fill in the superblock */ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_bdi = &as->volume->bdi; strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id)); /* allocate the root inode and dentry */ fid.vid = as->volume->vid; fid.vnode = 1; fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL); if (IS_ERR(inode)) return PTR_ERR(inode); if (params->autocell) set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); ret = -ENOMEM; sb->s_root = d_make_root(inode); if (!sb->s_root) goto error; sb->s_d_op = &afs_fs_dentry_operations; _leave(" = 0"); return 0; error: _leave(" = %d", ret); return ret; } /* * get an AFS superblock */ static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *options) { struct afs_mount_params params; struct super_block *sb; struct afs_volume *vol; struct key *key; char *new_opts = kstrdup(options, GFP_KERNEL); struct afs_super_info *as; int ret; _enter(",,%s,%p", dev_name, options); memset(&params, 0, sizeof(params)); /* parse the options and device name */ if (options) { ret = afs_parse_options(&params, options, &dev_name); if (ret < 0) goto error; } ret = afs_parse_device_name(&params, dev_name); if (ret < 0) goto error; /* try and do the mount securely */ key = afs_request_key(params.cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); ret = PTR_ERR(key); goto error; } params.key = key; /* parse the device name */ vol = afs_volume_lookup(&params); if (IS_ERR(vol)) { ret = PTR_ERR(vol); goto error; } /* allocate a superblock info record */ as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); if (!as) { ret = -ENOMEM; afs_put_volume(vol); goto error; } as->volume = vol; /* allocate a deviceless superblock */ sb = sget(fs_type, afs_test_super, afs_set_super, as); if (IS_ERR(sb)) { ret = PTR_ERR(sb); afs_put_volume(vol); kfree(as); goto error; } if (!sb->s_root) { /* initial superblock/root creation */ _debug("create"); sb->s_flags = flags; ret = afs_fill_super(sb, &params); if (ret < 0) { deactivate_locked_super(sb); goto error; } save_mount_options(sb, new_opts); sb->s_flags |= MS_ACTIVE; } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); afs_put_volume(vol); kfree(as); } afs_put_cell(params.cell); kfree(new_opts); _leave(" = 0 [%p]", sb); return dget(sb->s_root); error: afs_put_cell(params.cell); key_put(params.key); kfree(new_opts); _leave(" = %d", ret); return ERR_PTR(ret); } static void afs_kill_super(struct super_block *sb) { struct afs_super_info *as = sb->s_fs_info; kill_anon_super(sb); afs_put_volume(as->volume); kfree(as); } /* * initialise an inode cache slab element prior to any use */ static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->vfs_inode); init_waitqueue_head(&vnode->update_waitq); mutex_init(&vnode->permits_lock); mutex_init(&vnode->validate_lock); spin_lock_init(&vnode->writeback_lock); spin_lock_init(&vnode->lock); INIT_LIST_HEAD(&vnode->writebacks); INIT_LIST_HEAD(&vnode->pending_locks); INIT_LIST_HEAD(&vnode->granted_locks); INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work); INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work); } /* * allocate an AFS inode struct from our slab cache */ static struct inode *afs_alloc_inode(struct super_block *sb) { struct afs_vnode *vnode; vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); if (!vnode) return NULL; atomic_inc(&afs_count_active_inodes); memset(&vnode->fid, 0, sizeof(vnode->fid)); memset(&vnode->status, 0, sizeof(vnode->status)); vnode->volume = NULL; vnode->update_cnt = 0; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->cb_promised = false; _leave(" = %p", &vnode->vfs_inode); return &vnode->vfs_inode; } static void afs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct afs_vnode *vnode = AFS_FS_I(inode); kmem_cache_free(afs_inode_cachep, vnode); } /* * destroy an AFS inode struct */ static void afs_destroy_inode(struct inode *inode) { struct afs_vnode *vnode = AFS_FS_I(inode); _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode); _debug("DESTROY INODE %p", inode); ASSERTCMP(vnode->server, ==, NULL); call_rcu(&inode->i_rcu, afs_i_callback); atomic_dec(&afs_count_active_inodes); } /* * return information about an AFS volume */ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct afs_volume_status vs; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); struct key *key; int ret; key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = afs_vnode_get_volume_status(vnode, key, &vs); key_put(key); if (ret < 0) { _leave(" = %d", ret); return ret; } buf->f_type = dentry->d_sb->s_magic; buf->f_bsize = AFS_BLOCK_SIZE; buf->f_namelen = AFSNAMEMAX - 1; if (vs.max_quota == 0) buf->f_blocks = vs.part_max_blocks; else buf->f_blocks = vs.max_quota; buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use; return 0; }
gpl-2.0
jamieg71/kernel
net/nfc/nci/lib.c
4881
2213
/* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on lib.c, which was written * by Maxim Krasnyansky. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <net/nfc/nci.h> /* NCI status codes to Unix errno mapping */ int nci_to_errno(__u8 code) { switch (code) { case NCI_STATUS_OK: return 0; case NCI_STATUS_REJECTED: return -EBUSY; case NCI_STATUS_RF_FRAME_CORRUPTED: return -EBADMSG; case NCI_STATUS_NOT_INITIALIZED: return -EHOSTDOWN; case NCI_STATUS_SYNTAX_ERROR: case NCI_STATUS_SEMANTIC_ERROR: case NCI_STATUS_INVALID_PARAM: case NCI_STATUS_RF_PROTOCOL_ERROR: case NCI_STATUS_NFCEE_PROTOCOL_ERROR: return -EPROTO; case NCI_STATUS_UNKNOWN_GID: case NCI_STATUS_UNKNOWN_OID: return -EBADRQC; case NCI_STATUS_MESSAGE_SIZE_EXCEEDED: return -EMSGSIZE; case NCI_STATUS_DISCOVERY_ALREADY_STARTED: return -EALREADY; case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED: case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED: return -ECONNREFUSED; case NCI_STATUS_RF_TRANSMISSION_ERROR: case NCI_STATUS_NFCEE_TRANSMISSION_ERROR: return -ECOMM; case NCI_STATUS_RF_TIMEOUT_ERROR: case NCI_STATUS_NFCEE_TIMEOUT_ERROR: return -ETIMEDOUT; case NCI_STATUS_FAILED: default: return -ENOSYS; } } EXPORT_SYMBOL(nci_to_errno);
gpl-2.0
g7755725/Fitsugly
fs/ocfs2/move_extents.c
4881
27875
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * move_extents.c * * Copyright (C) 2011 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/mount.h> #include <linux/swap.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "ocfs2_ioctl.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "uptodate.h" #include "super.h" #include "dir.h" #include "buffer_head_io.h" #include "sysfile.h" #include "refcounttree.h" #include "move_extents.h" struct ocfs2_move_extents_context { struct inode *inode; struct file *file; int auto_defrag; int partial; int credits; u32 new_phys_cpos; u32 clusters_moved; u64 refcount_loc; struct ocfs2_move_extents *range; struct ocfs2_extent_tree et; struct ocfs2_alloc_context *meta_ac; struct ocfs2_alloc_context *data_ac; struct ocfs2_cached_dealloc_ctxt dealloc; }; static int __ocfs2_move_extent(handle_t *handle, struct ocfs2_move_extents_context *context, u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, int ext_flags) { int ret = 0, index; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_rec *rec, replace_rec; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el; u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, p_cpos, new_p_cpos, len); if (ret) { mlog_errno(ret); goto out; } memset(&replace_rec, 0, sizeof(replace_rec)); replace_rec.e_cpos = cpu_to_le32(cpos); replace_rec.e_leaf_clusters = cpu_to_le16(len); replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, new_p_cpos)); path = ocfs2_new_path_from_et(&context->et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { ocfs2_error(inode->i_sb, "Inode %llu has an extent at cpos %u which can no " "longer be found.\n", (unsigned long long)ino, cpos); ret = -EROFS; goto out; } rec = &el->l_recs[index]; BUG_ON(ext_flags != rec->e_flags); /* * after moving/defraging to new location, the extent is not going * to be refcounted anymore. */ replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), context->et.et_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_split_extent(handle, &context->et, path, index, &replace_rec, context->meta_ac, &context->dealloc); if (ret) { mlog_errno(ret); goto out; } ocfs2_journal_dirty(handle, context->et.et_root_bh); context->new_phys_cpos = new_p_cpos; /* * need I to append truncate log for old clusters? */ if (old_blkno) { if (ext_flags & OCFS2_EXT_REFCOUNTED) ret = ocfs2_decrease_refcount(inode, handle, ocfs2_blocks_to_clusters(osb->sb, old_blkno), len, context->meta_ac, &context->dealloc, 1); else ret = ocfs2_truncate_log_append(osb, handle, old_blkno, len); } out: return ret; } /* * lock allocators, and reserving appropriate number of bits for * meta blocks and data clusters. * * in some cases, we don't need to reserve clusters, just let data_ac * be NULL. */ static int ocfs2_lock_allocators_move_extents(struct inode *inode, struct ocfs2_extent_tree *et, u32 clusters_to_move, u32 extents_to_split, struct ocfs2_alloc_context **meta_ac, struct ocfs2_alloc_context **data_ac, int extra_blocks, int *credits) { int ret, num_free_extents; unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); num_free_extents = ocfs2_num_free_extents(osb, et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); goto out; } if (!num_free_extents || (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) { ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac); if (ret) { mlog_errno(ret); goto out; } } *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el, clusters_to_move + 2); mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", extra_blocks, clusters_to_move, *credits); out: if (ret) { if (*meta_ac) { ocfs2_free_alloc_context(*meta_ac); *meta_ac = NULL; } } return ret; } /* * Using one journal handle to guarantee the data consistency in case * crash happens anywhere. * * XXX: defrag can end up with finishing partial extent as requested, * due to not enough contiguous clusters can be found in allocator. */ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, u32 cpos, u32 phys_cpos, u32 *len, int ext_flags) { int ret, credits = 0, extra_blocks = 0, partial = context->partial; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_refcount_tree *ref_tree = NULL; u32 new_phys_cpos, new_len; u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); BUG_ON(!context->refcount_loc); ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); return ret; } ret = ocfs2_prepare_refcount_change_for_del(inode, context->refcount_loc, phys_blkno, *len, &credits, &extra_blocks); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, &context->meta_ac, &context->data_ac, extra_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } /* * should be using allocation reservation strategy there? * * if (context->data_ac) * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; */ mutex_lock(&tl_inode->i_mutex); if (ocfs2_truncate_log_needs_flush(osb)) { ret = __ocfs2_flush_truncate_log(osb); if (ret < 0) { mlog_errno(ret); goto out_unlock_mutex; } } handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock_mutex; } ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, &new_phys_cpos, &new_len); if (ret) { mlog_errno(ret); goto out_commit; } /* * allowing partial extent moving is kind of 'pros and cons', it makes * whole defragmentation less likely to fail, on the contrary, the bad * thing is it may make the fs even more fragmented after moving, let * userspace make a good decision here. */ if (new_len != *len) { mlog(0, "len_claimed: %u, len: %u\n", new_len, *len); if (!partial) { context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; ret = -ENOSPC; goto out_commit; } } mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos, phys_cpos, new_phys_cpos); ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, new_phys_cpos, ext_flags); if (ret) mlog_errno(ret); if (partial && (new_len != *len)) *len = new_len; /* * Here we should write the new page out first if we are * in write-back mode. */ ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); out_unlock_mutex: mutex_unlock(&tl_inode->i_mutex); if (context->data_ac) { ocfs2_free_alloc_context(context->data_ac); context->data_ac = NULL; } if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } out: if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } /* * find the victim alloc group, where #blkno fits. */ static int ocfs2_find_victim_alloc_group(struct inode *inode, u64 vict_blkno, int type, int slot, int *vict_bit, struct buffer_head **ret_bh) { int ret, i, bits_per_unit = 0; u64 blkno; char namebuf[40]; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *ac_bh = NULL, *gd_bh = NULL; struct ocfs2_chain_list *cl; struct ocfs2_chain_rec *rec; struct ocfs2_dinode *ac_dinode; struct ocfs2_group_desc *bg; ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot); ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, strlen(namebuf), &blkno); if (ret) { ret = -ENOENT; goto out; } ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh); if (ret) { mlog_errno(ret); goto out; } ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data; cl = &(ac_dinode->id2.i_chain); rec = &(cl->cl_recs[0]); if (type == GLOBAL_BITMAP_SYSTEM_INODE) bits_per_unit = osb->s_clustersize_bits - inode->i_sb->s_blocksize_bits; /* * 'vict_blkno' was out of the valid range. */ if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << bits_per_unit))) { ret = -EINVAL; goto out; } for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) { rec = &(cl->cl_recs[i]); if (!rec) continue; bg = NULL; do { if (!bg) blkno = le64_to_cpu(rec->c_blkno); else blkno = le64_to_cpu(bg->bg_next_group); if (gd_bh) { brelse(gd_bh); gd_bh = NULL; } ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh); if (ret) { mlog_errno(ret); goto out; } bg = (struct ocfs2_group_desc *)gd_bh->b_data; if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + le16_to_cpu(bg->bg_bits))) { *ret_bh = gd_bh; *vict_bit = (vict_blkno - blkno) >> bits_per_unit; mlog(0, "find the victim group: #%llu, " "total_bits: %u, vict_bit: %u\n", blkno, le16_to_cpu(bg->bg_bits), *vict_bit); goto out; } } while (le64_to_cpu(bg->bg_next_group)); } ret = -EINVAL; out: brelse(ac_bh); /* * caller has to release the gd_bh properly. */ return ret; } /* * XXX: helper to validate and adjust moving goal. */ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, struct ocfs2_move_extents *range) { int ret, goal_bit = 0; struct buffer_head *gd_bh = NULL; struct ocfs2_group_desc *bg = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int c_to_b = 1 << (osb->s_clustersize_bits - inode->i_sb->s_blocksize_bits); /* * make goal become cluster aligned. */ range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb, range->me_goal); /* * moving goal is not allowd to start with a group desc blok(#0 blk) * let's compromise to the latter cluster. */ if (range->me_goal == le64_to_cpu(bg->bg_blkno)) range->me_goal += c_to_b; /* * validate goal sits within global_bitmap, and return the victim * group desc */ ret = ocfs2_find_victim_alloc_group(inode, range->me_goal, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT, &goal_bit, &gd_bh); if (ret) goto out; bg = (struct ocfs2_group_desc *)gd_bh->b_data; /* * movement is not gonna cross two groups. */ if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize < range->me_len) { ret = -EINVAL; goto out; } /* * more exact validations/adjustments will be performed later during * moving operation for each extent range. */ mlog(0, "extents get ready to be moved to #%llu block\n", range->me_goal); out: brelse(gd_bh); return ret; } static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, int *goal_bit, u32 move_len, u32 max_hop, u32 *phys_cpos) { int i, used, last_free_bits = 0, base_bit = *goal_bit; struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb, le64_to_cpu(gd->bg_blkno)); for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) { used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap); if (used) { /* * we even tried searching the free chunk by jumping * a 'max_hop' distance, but still failed. */ if ((i - base_bit) > max_hop) { *phys_cpos = 0; break; } if (last_free_bits) last_free_bits = 0; continue; } else last_free_bits++; if (last_free_bits == move_len) { *goal_bit = i; *phys_cpos = base_cpos + i; break; } } mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos); } static int ocfs2_alloc_dinode_update_counts(struct inode *inode, handle_t *handle, struct buffer_head *di_bh, u32 num_bits, u16 chain) { int ret; u32 tmp_used; struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; } tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); ocfs2_journal_dirty(handle, di_bh); out: return ret; } static inline int ocfs2_block_group_set_bits(handle_t *handle, struct inode *alloc_inode, struct ocfs2_group_desc *bg, struct buffer_head *group_bh, unsigned int bit_off, unsigned int num_bits) { int status; void *bitmap = bg->bg_bitmap; int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; /* All callers get the descriptor via * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, num_bits); if (ocfs2_is_cluster_bitmap(alloc_inode)) journal_type = OCFS2_JOURNAL_ACCESS_UNDO; status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), group_bh, journal_type); if (status < 0) { mlog_errno(status); goto bail; } le16_add_cpu(&bg->bg_free_bits_count, -num_bits); if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) { ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit" " count %u but claims %u are freed. num_bits %d", (unsigned long long)le64_to_cpu(bg->bg_blkno), le16_to_cpu(bg->bg_bits), le16_to_cpu(bg->bg_free_bits_count), num_bits); return -EROFS; } while (num_bits--) ocfs2_set_bit(bit_off++, bitmap); ocfs2_journal_dirty(handle, group_bh); bail: return status; } static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, u32 cpos, u32 phys_cpos, u32 *new_phys_cpos, u32 len, int ext_flags) { int ret, credits = 0, extra_blocks = 0, goal_bit = 0; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct inode *gb_inode = NULL; struct buffer_head *gb_bh = NULL; struct buffer_head *gd_bh = NULL; struct ocfs2_group_desc *gd; struct ocfs2_refcount_tree *ref_tree = NULL; u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb, context->range->me_threshold); u64 phys_blkno, new_phys_blkno; phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) { BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); BUG_ON(!context->refcount_loc); ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, &ref_tree, NULL); if (ret) { mlog_errno(ret); return ret; } ret = ocfs2_prepare_refcount_change_for_del(inode, context->refcount_loc, phys_blkno, len, &credits, &extra_blocks); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, &context->meta_ac, NULL, extra_blocks, &credits); if (ret) { mlog_errno(ret); goto out; } /* * need to count 2 extra credits for global_bitmap inode and * group descriptor. */ credits += OCFS2_INODE_UPDATE_CREDITS + 1; /* * ocfs2_move_extent() didn't reserve any clusters in lock_allocators() * logic, while we still need to lock the global_bitmap. */ gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT); if (!gb_inode) { mlog(ML_ERROR, "unable to get global_bitmap inode\n"); ret = -EIO; goto out; } mutex_lock(&gb_inode->i_mutex); ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1); if (ret) { mlog_errno(ret); goto out_unlock_gb_mutex; } mutex_lock(&tl_inode->i_mutex); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_unlock_tl_inode; } new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos); ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno, GLOBAL_BITMAP_SYSTEM_INODE, OCFS2_INVALID_SLOT, &goal_bit, &gd_bh); if (ret) { mlog_errno(ret); goto out_commit; } /* * probe the victim cluster group to find a proper * region to fit wanted movement, it even will perfrom * a best-effort attempt by compromising to a threshold * around the goal. */ ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, new_phys_cpos); if (!*new_phys_cpos) { ret = -ENOSPC; goto out_commit; } ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, *new_phys_cpos, ext_flags); if (ret) { mlog_errno(ret); goto out_commit; } gd = (struct ocfs2_group_desc *)gd_bh->b_data; ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len, le16_to_cpu(gd->bg_chain)); if (ret) { mlog_errno(ret); goto out_commit; } ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, goal_bit, len); if (ret) mlog_errno(ret); /* * Here we should write the new page out first if we are * in write-back mode. */ ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); if (ret) mlog_errno(ret); out_commit: ocfs2_commit_trans(osb, handle); brelse(gd_bh); out_unlock_tl_inode: mutex_unlock(&tl_inode->i_mutex); ocfs2_inode_unlock(gb_inode, 1); out_unlock_gb_mutex: mutex_unlock(&gb_inode->i_mutex); brelse(gb_bh); iput(gb_inode); out: if (context->meta_ac) { ocfs2_free_alloc_context(context->meta_ac); context->meta_ac = NULL; } if (ref_tree) ocfs2_unlock_refcount_tree(osb, ref_tree, 1); return ret; } /* * Helper to calculate the defraging length in one run according to threshold. */ static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged, u32 threshold, int *skip) { if ((*alloc_size + *len_defraged) < threshold) { /* * proceed defragmentation until we meet the thresh */ *len_defraged += *alloc_size; } else if (*len_defraged == 0) { /* * XXX: skip a large extent. */ *skip = 1; } else { /* * split this extent to coalesce with former pieces as * to reach the threshold. * * we're done here with one cycle of defragmentation * in a size of 'thresh', resetting 'len_defraged' * forces a new defragmentation. */ *alloc_size = threshold - *len_defraged; *len_defraged = 0; } } static int __ocfs2_move_extents_range(struct buffer_head *di_bh, struct ocfs2_move_extents_context *context) { int ret = 0, flags, do_defrag, skip = 0; u32 cpos, phys_cpos, move_start, len_to_move, alloc_size; u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0; struct inode *inode = context->inode; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_move_extents *range = context->range; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if ((inode->i_size == 0) || (range->me_len == 0)) return 0; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; context->refcount_loc = le64_to_cpu(di->i_refcount_loc); ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&context->dealloc); /* * TO-DO XXX: * * - xattr extents. */ do_defrag = context->auto_defrag; /* * extents moving happens in unit of clusters, for the sake * of simplicity, we may ignore two clusters where 'byte_start' * and 'byte_start + len' were within. */ move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start); len_to_move = (range->me_start + range->me_len) >> osb->s_clustersize_bits; if (len_to_move >= move_start) len_to_move -= move_start; else len_to_move = 0; if (do_defrag) { defrag_thresh = range->me_threshold >> osb->s_clustersize_bits; if (defrag_thresh <= 1) goto done; } else new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, range->me_goal); mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, " "thresh: %u\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)range->me_start, (unsigned long long)range->me_len, move_start, len_to_move, defrag_thresh); cpos = move_start; while (len_to_move) { ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size, &flags); if (ret) { mlog_errno(ret); goto out; } if (alloc_size > len_to_move) alloc_size = len_to_move; /* * XXX: how to deal with a hole: * * - skip the hole of course * - force a new defragmentation */ if (!phys_cpos) { if (do_defrag) len_defraged = 0; goto next; } if (do_defrag) { ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged, defrag_thresh, &skip); /* * skip large extents */ if (skip) { skip = 0; goto next; } mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, " "alloc_size: %u, len_defraged: %u\n", cpos, phys_cpos, alloc_size, len_defraged); ret = ocfs2_defrag_extent(context, cpos, phys_cpos, &alloc_size, flags); } else { ret = ocfs2_move_extent(context, cpos, phys_cpos, &new_phys_cpos, alloc_size, flags); new_phys_cpos += alloc_size; } if (ret < 0) { mlog_errno(ret); goto out; } context->clusters_moved += alloc_size; next: cpos += alloc_size; len_to_move -= alloc_size; } done: range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE; out: range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb, context->clusters_moved); range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb, context->new_phys_cpos); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &context->dealloc); return ret; } static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) { int status; handle_t *handle; struct inode *inode = context->inode; struct ocfs2_dinode *di; struct buffer_head *di_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!inode) return -ENOENT; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; mutex_lock(&inode->i_mutex); /* * This prevents concurrent writes from other nodes */ status = ocfs2_rw_lock(inode, 1); if (status) { mlog_errno(status); goto out; } status = ocfs2_inode_lock(inode, &di_bh, 1); if (status) { mlog_errno(status); goto out_rw_unlock; } /* * rememer ip_xattr_sem also needs to be held if necessary */ down_write(&OCFS2_I(inode)->ip_alloc_sem); status = __ocfs2_move_extents_range(di_bh, context); up_write(&OCFS2_I(inode)->ip_alloc_sem); if (status) { mlog_errno(status); goto out_inode_unlock; } /* * We update ctime for these changes */ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto out_inode_unlock; } status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status) { mlog_errno(status); goto out_commit; } di = (struct ocfs2_dinode *)di_bh->b_data; inode->i_ctime = CURRENT_TIME; di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); ocfs2_journal_dirty(handle, di_bh); out_commit: ocfs2_commit_trans(osb, handle); out_inode_unlock: brelse(di_bh); ocfs2_inode_unlock(inode, 1); out_rw_unlock: ocfs2_rw_unlock(inode, 1); out: mutex_unlock(&inode->i_mutex); return status; } int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) { int status; struct inode *inode = filp->f_path.dentry->d_inode; struct ocfs2_move_extents range; struct ocfs2_move_extents_context *context = NULL; status = mnt_want_write_file(filp); if (status) return status; if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) goto out; if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { status = -EPERM; goto out; } context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); if (!context) { status = -ENOMEM; mlog_errno(status); goto out; } context->inode = inode; context->file = filp; if (argp) { if (copy_from_user(&range, (struct ocfs2_move_extents *)argp, sizeof(range))) { status = -EFAULT; goto out; } } else { status = -EINVAL; goto out; } if (range.me_start > i_size_read(inode)) goto out; if (range.me_start + range.me_len > i_size_read(inode)) range.me_len = i_size_read(inode) - range.me_start; context->range = &range; if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { context->auto_defrag = 1; /* * ok, the default theshold for the defragmentation * is 1M, since our maximum clustersize was 1M also. * any thought? */ if (!range.me_threshold) range.me_threshold = 1024 * 1024; if (range.me_threshold > i_size_read(inode)) range.me_threshold = i_size_read(inode); if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) context->partial = 1; } else { /* * first best-effort attempt to validate and adjust the goal * (physical address in block), while it can't guarantee later * operation can succeed all the time since global_bitmap may * change a bit over time. */ status = ocfs2_validate_and_adjust_move_goal(inode, &range); if (status) goto out; } status = ocfs2_move_extents(context); if (status) mlog_errno(status); out: /* * movement/defragmentation may end up being partially completed, * that's the reason why we need to return userspace the finished * length and new_offset even if failure happens somewhere. */ if (argp) { if (copy_to_user((struct ocfs2_move_extents *)argp, &range, sizeof(range))) status = -EFAULT; } kfree(context); mnt_drop_write_file(filp); return status; }
gpl-2.0
garwynn/android_kernel_samsung_klte
sound/pci/ac97/ac97_pcm.c
5393
21270
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Universal interface for Audio Codec '97 * * For more details look to AC '97 component specification revision 2.2 * by Intel Corporation (http://developer.intel.com) and to datasheets * for specific codecs. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/export.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/control.h> #include <sound/ac97_codec.h> #include <sound/asoundef.h> #include "ac97_id.h" #include "ac97_local.h" /* * PCM support */ static unsigned char rate_reg_tables[2][4][9] = { { /* standard rates */ { /* 3&4 front, 7&8 rear, 6&9 center/lfe */ AC97_PCM_FRONT_DAC_RATE, /* slot 3 */ AC97_PCM_FRONT_DAC_RATE, /* slot 4 */ 0xff, /* slot 5 */ AC97_PCM_LFE_DAC_RATE, /* slot 6 */ AC97_PCM_SURR_DAC_RATE, /* slot 7 */ AC97_PCM_SURR_DAC_RATE, /* slot 8 */ AC97_PCM_LFE_DAC_RATE, /* slot 9 */ 0xff, /* slot 10 */ 0xff, /* slot 11 */ }, { /* 7&8 front, 6&9 rear, 10&11 center/lfe */ 0xff, /* slot 3 */ 0xff, /* slot 4 */ 0xff, /* slot 5 */ AC97_PCM_SURR_DAC_RATE, /* slot 6 */ AC97_PCM_FRONT_DAC_RATE, /* slot 7 */ AC97_PCM_FRONT_DAC_RATE, /* slot 8 */ AC97_PCM_SURR_DAC_RATE, /* slot 9 */ AC97_PCM_LFE_DAC_RATE, /* slot 10 */ AC97_PCM_LFE_DAC_RATE, /* slot 11 */ }, { /* 6&9 front, 10&11 rear, 3&4 center/lfe */ AC97_PCM_LFE_DAC_RATE, /* slot 3 */ AC97_PCM_LFE_DAC_RATE, /* slot 4 */ 0xff, /* slot 5 */ AC97_PCM_FRONT_DAC_RATE, /* slot 6 */ 0xff, /* slot 7 */ 0xff, /* slot 8 */ AC97_PCM_FRONT_DAC_RATE, /* slot 9 */ AC97_PCM_SURR_DAC_RATE, /* slot 10 */ AC97_PCM_SURR_DAC_RATE, /* slot 11 */ }, { /* 10&11 front, 3&4 rear, 7&8 center/lfe */ AC97_PCM_SURR_DAC_RATE, /* slot 3 */ AC97_PCM_SURR_DAC_RATE, /* slot 4 */ 0xff, /* slot 5 */ 0xff, /* slot 6 */ AC97_PCM_LFE_DAC_RATE, /* slot 7 */ AC97_PCM_LFE_DAC_RATE, /* slot 8 */ 0xff, /* slot 9 */ AC97_PCM_FRONT_DAC_RATE, /* slot 10 */ AC97_PCM_FRONT_DAC_RATE, /* slot 11 */ }, }, { /* double rates */ { /* 3&4 front, 7&8 front (t+1) */ AC97_PCM_FRONT_DAC_RATE, /* slot 3 */ AC97_PCM_FRONT_DAC_RATE, /* slot 4 */ 0xff, /* slot 5 */ 0xff, /* slot 6 */ AC97_PCM_FRONT_DAC_RATE, /* slot 7 */ AC97_PCM_FRONT_DAC_RATE, /* slot 8 */ 0xff, /* slot 9 */ 0xff, /* slot 10 */ 0xff, /* slot 11 */ }, { /* not specified in the specification */ 0xff, /* slot 3 */ 0xff, /* slot 4 */ 0xff, /* slot 5 */ 0xff, /* slot 6 */ 0xff, /* slot 7 */ 0xff, /* slot 8 */ 0xff, /* slot 9 */ 0xff, /* slot 10 */ 0xff, /* slot 11 */ }, { 0xff, /* slot 3 */ 0xff, /* slot 4 */ 0xff, /* slot 5 */ 0xff, /* slot 6 */ 0xff, /* slot 7 */ 0xff, /* slot 8 */ 0xff, /* slot 9 */ 0xff, /* slot 10 */ 0xff, /* slot 11 */ }, { 0xff, /* slot 3 */ 0xff, /* slot 4 */ 0xff, /* slot 5 */ 0xff, /* slot 6 */ 0xff, /* slot 7 */ 0xff, /* slot 8 */ 0xff, /* slot 9 */ 0xff, /* slot 10 */ 0xff, /* slot 11 */ } }}; /* FIXME: more various mappings for ADC? */ static unsigned char rate_cregs[9] = { AC97_PCM_LR_ADC_RATE, /* 3 */ AC97_PCM_LR_ADC_RATE, /* 4 */ 0xff, /* 5 */ AC97_PCM_MIC_ADC_RATE, /* 6 */ 0xff, /* 7 */ 0xff, /* 8 */ 0xff, /* 9 */ 0xff, /* 10 */ 0xff, /* 11 */ }; static unsigned char get_slot_reg(struct ac97_pcm *pcm, unsigned short cidx, unsigned short slot, int dbl) { if (slot < 3) return 0xff; if (slot > 11) return 0xff; if (pcm->spdif) return AC97_SPDIF; /* pseudo register */ if (pcm->stream == SNDRV_PCM_STREAM_PLAYBACK) return rate_reg_tables[dbl][pcm->r[dbl].rate_table[cidx]][slot - 3]; else return rate_cregs[slot - 3]; } static int set_spdif_rate(struct snd_ac97 *ac97, unsigned short rate) { unsigned short old, bits, reg, mask; unsigned int sbits; if (! (ac97->ext_id & AC97_EI_SPDIF)) return -ENODEV; /* TODO: double rate support */ if (ac97->flags & AC97_CS_SPDIF) { switch (rate) { case 48000: bits = 0; break; case 44100: bits = 1 << AC97_SC_SPSR_SHIFT; break; default: /* invalid - disable output */ snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); return -EINVAL; } reg = AC97_CSR_SPDIF; mask = 1 << AC97_SC_SPSR_SHIFT; } else { if (ac97->id == AC97_ID_CM9739 && rate != 48000) { snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); return -EINVAL; } switch (rate) { case 44100: bits = AC97_SC_SPSR_44K; break; case 48000: bits = AC97_SC_SPSR_48K; break; case 32000: bits = AC97_SC_SPSR_32K; break; default: /* invalid - disable output */ snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); return -EINVAL; } reg = AC97_SPDIF; mask = AC97_SC_SPSR_MASK; } mutex_lock(&ac97->reg_mutex); old = snd_ac97_read(ac97, reg) & mask; if (old != bits) { snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, 0); snd_ac97_update_bits_nolock(ac97, reg, mask, bits); /* update the internal spdif bits */ sbits = ac97->spdif_status; if (sbits & IEC958_AES0_PROFESSIONAL) { sbits &= ~IEC958_AES0_PRO_FS; switch (rate) { case 44100: sbits |= IEC958_AES0_PRO_FS_44100; break; case 48000: sbits |= IEC958_AES0_PRO_FS_48000; break; case 32000: sbits |= IEC958_AES0_PRO_FS_32000; break; } } else { sbits &= ~(IEC958_AES3_CON_FS << 24); switch (rate) { case 44100: sbits |= IEC958_AES3_CON_FS_44100<<24; break; case 48000: sbits |= IEC958_AES3_CON_FS_48000<<24; break; case 32000: sbits |= IEC958_AES3_CON_FS_32000<<24; break; } } ac97->spdif_status = sbits; } snd_ac97_update_bits_nolock(ac97, AC97_EXTENDED_STATUS, AC97_EA_SPDIF, AC97_EA_SPDIF); mutex_unlock(&ac97->reg_mutex); return 0; } /** * snd_ac97_set_rate - change the rate of the given input/output. * @ac97: the ac97 instance * @reg: the register to change * @rate: the sample rate to set * * Changes the rate of the given input/output on the codec. * If the codec doesn't support VAR, the rate must be 48000 (except * for SPDIF). * * The valid registers are AC97_PMC_MIC_ADC_RATE, * AC97_PCM_FRONT_DAC_RATE, AC97_PCM_LR_ADC_RATE. * AC97_PCM_SURR_DAC_RATE and AC97_PCM_LFE_DAC_RATE are accepted * if the codec supports them. * AC97_SPDIF is accepted as a pseudo register to modify the SPDIF * status bits. * * Returns zero if successful, or a negative error code on failure. */ int snd_ac97_set_rate(struct snd_ac97 *ac97, int reg, unsigned int rate) { int dbl; unsigned int tmp; dbl = rate > 48000; if (dbl) { if (!(ac97->flags & AC97_DOUBLE_RATE)) return -EINVAL; if (reg != AC97_PCM_FRONT_DAC_RATE) return -EINVAL; } snd_ac97_update_power(ac97, reg, 1); switch (reg) { case AC97_PCM_MIC_ADC_RATE: if ((ac97->regs[AC97_EXTENDED_STATUS] & AC97_EA_VRM) == 0) /* MIC VRA */ if (rate != 48000) return -EINVAL; break; case AC97_PCM_FRONT_DAC_RATE: case AC97_PCM_LR_ADC_RATE: if ((ac97->regs[AC97_EXTENDED_STATUS] & AC97_EA_VRA) == 0) /* VRA */ if (rate != 48000 && rate != 96000) return -EINVAL; break; case AC97_PCM_SURR_DAC_RATE: if (! (ac97->scaps & AC97_SCAP_SURROUND_DAC)) return -EINVAL; break; case AC97_PCM_LFE_DAC_RATE: if (! (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC)) return -EINVAL; break; case AC97_SPDIF: /* special case */ return set_spdif_rate(ac97, rate); default: return -EINVAL; } if (dbl) rate /= 2; tmp = (rate * ac97->bus->clock) / 48000; if (tmp > 65535) return -EINVAL; if ((ac97->ext_id & AC97_EI_DRA) && reg == AC97_PCM_FRONT_DAC_RATE) snd_ac97_update_bits(ac97, AC97_EXTENDED_STATUS, AC97_EA_DRA, dbl ? AC97_EA_DRA : 0); snd_ac97_update(ac97, reg, tmp & 0xffff); snd_ac97_read(ac97, reg); if ((ac97->ext_id & AC97_EI_DRA) && reg == AC97_PCM_FRONT_DAC_RATE) { /* Intel controllers require double rate data to be put in * slots 7+8 */ snd_ac97_update_bits(ac97, AC97_GENERAL_PURPOSE, AC97_GP_DRSS_MASK, dbl ? AC97_GP_DRSS_78 : 0); snd_ac97_read(ac97, AC97_GENERAL_PURPOSE); } return 0; } EXPORT_SYMBOL(snd_ac97_set_rate); static unsigned short get_pslots(struct snd_ac97 *ac97, unsigned char *rate_table, unsigned short *spdif_slots) { if (!ac97_is_audio(ac97)) return 0; if (ac97_is_rev22(ac97) || ac97_can_amap(ac97)) { unsigned short slots = 0; if (ac97_is_rev22(ac97)) { /* Note: it's simply emulation of AMAP behaviour */ u16 es; es = ac97->regs[AC97_EXTENDED_ID] &= ~AC97_EI_DACS_SLOT_MASK; switch (ac97->addr) { case 1: case 2: es |= (1<<AC97_EI_DACS_SLOT_SHIFT); break; case 3: es |= (2<<AC97_EI_DACS_SLOT_SHIFT); break; } snd_ac97_write_cache(ac97, AC97_EXTENDED_ID, es); } switch (ac97->addr) { case 0: slots |= (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); if (ac97->scaps & AC97_SCAP_SURROUND_DAC) slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); if (ac97->ext_id & AC97_EI_SPDIF) { if (!(ac97->scaps & AC97_SCAP_SURROUND_DAC)) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT)|(1<<AC97_SLOT_SPDIF_RIGHT); else if (!(ac97->scaps & AC97_SCAP_CENTER_LFE_DAC)) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT1)|(1<<AC97_SLOT_SPDIF_RIGHT1); else *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT2)|(1<<AC97_SLOT_SPDIF_RIGHT2); } *rate_table = 0; break; case 1: case 2: slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); if (ac97->scaps & AC97_SCAP_SURROUND_DAC) slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); if (ac97->ext_id & AC97_EI_SPDIF) { if (!(ac97->scaps & AC97_SCAP_SURROUND_DAC)) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT1)|(1<<AC97_SLOT_SPDIF_RIGHT1); else *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT2)|(1<<AC97_SLOT_SPDIF_RIGHT2); } *rate_table = 1; break; case 3: slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); if (ac97->ext_id & AC97_EI_SPDIF) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT2)|(1<<AC97_SLOT_SPDIF_RIGHT2); *rate_table = 2; break; } return slots; } else { unsigned short slots; slots = (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); if (ac97->scaps & AC97_SCAP_SURROUND_DAC) slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); if (ac97->scaps & AC97_SCAP_CENTER_LFE_DAC) slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); if (ac97->ext_id & AC97_EI_SPDIF) { if (!(ac97->scaps & AC97_SCAP_SURROUND_DAC)) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT)|(1<<AC97_SLOT_SPDIF_RIGHT); else if (!(ac97->scaps & AC97_SCAP_CENTER_LFE_DAC)) *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT1)|(1<<AC97_SLOT_SPDIF_RIGHT1); else *spdif_slots = (1<<AC97_SLOT_SPDIF_LEFT2)|(1<<AC97_SLOT_SPDIF_RIGHT2); } *rate_table = 0; return slots; } } static unsigned short get_cslots(struct snd_ac97 *ac97) { unsigned short slots; if (!ac97_is_audio(ac97)) return 0; slots = (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); slots |= (1<<AC97_SLOT_MIC); return slots; } static unsigned int get_rates(struct ac97_pcm *pcm, unsigned int cidx, unsigned short slots, int dbl) { int i, idx; unsigned int rates = ~0; unsigned char reg; for (i = 3; i < 12; i++) { if (!(slots & (1 << i))) continue; reg = get_slot_reg(pcm, cidx, i, dbl); switch (reg) { case AC97_PCM_FRONT_DAC_RATE: idx = AC97_RATES_FRONT_DAC; break; case AC97_PCM_SURR_DAC_RATE: idx = AC97_RATES_SURR_DAC; break; case AC97_PCM_LFE_DAC_RATE: idx = AC97_RATES_LFE_DAC; break; case AC97_PCM_LR_ADC_RATE: idx = AC97_RATES_ADC; break; case AC97_PCM_MIC_ADC_RATE: idx = AC97_RATES_MIC_ADC; break; default: idx = AC97_RATES_SPDIF; break; } rates &= pcm->r[dbl].codec[cidx]->rates[idx]; } if (!dbl) rates &= ~(SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000); return rates; } /** * snd_ac97_pcm_assign - assign AC97 slots to given PCM streams * @bus: the ac97 bus instance * @pcms_count: count of PCMs to be assigned * @pcms: PCMs to be assigned * * It assigns available AC97 slots for given PCMs. If none or only * some slots are available, pcm->xxx.slots and pcm->xxx.rslots[] members * are reduced and might be zero. */ int snd_ac97_pcm_assign(struct snd_ac97_bus *bus, unsigned short pcms_count, const struct ac97_pcm *pcms) { int i, j, k; const struct ac97_pcm *pcm; struct ac97_pcm *rpcms, *rpcm; unsigned short avail_slots[2][4]; unsigned char rate_table[2][4]; unsigned short tmp, slots; unsigned short spdif_slots[4]; unsigned int rates; struct snd_ac97 *codec; rpcms = kcalloc(pcms_count, sizeof(struct ac97_pcm), GFP_KERNEL); if (rpcms == NULL) return -ENOMEM; memset(avail_slots, 0, sizeof(avail_slots)); memset(rate_table, 0, sizeof(rate_table)); memset(spdif_slots, 0, sizeof(spdif_slots)); for (i = 0; i < 4; i++) { codec = bus->codec[i]; if (!codec) continue; avail_slots[0][i] = get_pslots(codec, &rate_table[0][i], &spdif_slots[i]); avail_slots[1][i] = get_cslots(codec); if (!(codec->scaps & AC97_SCAP_INDEP_SDIN)) { for (j = 0; j < i; j++) { if (bus->codec[j]) avail_slots[1][i] &= ~avail_slots[1][j]; } } } /* first step - exclusive devices */ for (i = 0; i < pcms_count; i++) { pcm = &pcms[i]; rpcm = &rpcms[i]; /* low-level driver thinks that it's more clever */ if (pcm->copy_flag) { *rpcm = *pcm; continue; } rpcm->stream = pcm->stream; rpcm->exclusive = pcm->exclusive; rpcm->spdif = pcm->spdif; rpcm->private_value = pcm->private_value; rpcm->bus = bus; rpcm->rates = ~0; slots = pcm->r[0].slots; for (j = 0; j < 4 && slots; j++) { if (!bus->codec[j]) continue; rates = ~0; if (pcm->spdif && pcm->stream == 0) tmp = spdif_slots[j]; else tmp = avail_slots[pcm->stream][j]; if (pcm->exclusive) { /* exclusive access */ tmp &= slots; for (k = 0; k < i; k++) { if (rpcm->stream == rpcms[k].stream) tmp &= ~rpcms[k].r[0].rslots[j]; } } else { /* non-exclusive access */ tmp &= pcm->r[0].slots; } if (tmp) { rpcm->r[0].rslots[j] = tmp; rpcm->r[0].codec[j] = bus->codec[j]; rpcm->r[0].rate_table[j] = rate_table[pcm->stream][j]; if (bus->no_vra) rates = SNDRV_PCM_RATE_48000; else rates = get_rates(rpcm, j, tmp, 0); if (pcm->exclusive) avail_slots[pcm->stream][j] &= ~tmp; } slots &= ~tmp; rpcm->r[0].slots |= tmp; rpcm->rates &= rates; } /* for double rate, we check the first codec only */ if (pcm->stream == SNDRV_PCM_STREAM_PLAYBACK && bus->codec[0] && (bus->codec[0]->flags & AC97_DOUBLE_RATE) && rate_table[pcm->stream][0] == 0) { tmp = (1<<AC97_SLOT_PCM_LEFT) | (1<<AC97_SLOT_PCM_RIGHT) | (1<<AC97_SLOT_PCM_LEFT_0) | (1<<AC97_SLOT_PCM_RIGHT_0); if ((tmp & pcm->r[1].slots) == tmp) { rpcm->r[1].slots = tmp; rpcm->r[1].rslots[0] = tmp; rpcm->r[1].rate_table[0] = 0; rpcm->r[1].codec[0] = bus->codec[0]; if (pcm->exclusive) avail_slots[pcm->stream][0] &= ~tmp; if (bus->no_vra) rates = SNDRV_PCM_RATE_96000; else rates = get_rates(rpcm, 0, tmp, 1); rpcm->rates |= rates; } } if (rpcm->rates == ~0) rpcm->rates = 0; /* not used */ } bus->pcms_count = pcms_count; bus->pcms = rpcms; return 0; } EXPORT_SYMBOL(snd_ac97_pcm_assign); /** * snd_ac97_pcm_open - opens the given AC97 pcm * @pcm: the ac97 pcm instance * @rate: rate in Hz, if codec does not support VRA, this value must be 48000Hz * @cfg: output stream characteristics * @slots: a subset of allocated slots (snd_ac97_pcm_assign) for this pcm * * It locks the specified slots and sets the given rate to AC97 registers. */ int snd_ac97_pcm_open(struct ac97_pcm *pcm, unsigned int rate, enum ac97_pcm_cfg cfg, unsigned short slots) { struct snd_ac97_bus *bus; int i, cidx, r, ok_flag; unsigned int reg_ok[4] = {0,0,0,0}; unsigned char reg; int err = 0; r = rate > 48000; bus = pcm->bus; if (cfg == AC97_PCM_CFG_SPDIF) { for (cidx = 0; cidx < 4; cidx++) if (bus->codec[cidx] && (bus->codec[cidx]->ext_id & AC97_EI_SPDIF)) { err = set_spdif_rate(bus->codec[cidx], rate); if (err < 0) return err; } } spin_lock_irq(&pcm->bus->bus_lock); for (i = 3; i < 12; i++) { if (!(slots & (1 << i))) continue; ok_flag = 0; for (cidx = 0; cidx < 4; cidx++) { if (bus->used_slots[pcm->stream][cidx] & (1 << i)) { spin_unlock_irq(&pcm->bus->bus_lock); err = -EBUSY; goto error; } if (pcm->r[r].rslots[cidx] & (1 << i)) { bus->used_slots[pcm->stream][cidx] |= (1 << i); ok_flag++; } } if (!ok_flag) { spin_unlock_irq(&pcm->bus->bus_lock); snd_printk(KERN_ERR "cannot find configuration for AC97 slot %i\n", i); err = -EAGAIN; goto error; } } pcm->cur_dbl = r; spin_unlock_irq(&pcm->bus->bus_lock); for (i = 3; i < 12; i++) { if (!(slots & (1 << i))) continue; for (cidx = 0; cidx < 4; cidx++) { if (pcm->r[r].rslots[cidx] & (1 << i)) { reg = get_slot_reg(pcm, cidx, i, r); if (reg == 0xff) { snd_printk(KERN_ERR "invalid AC97 slot %i?\n", i); continue; } if (reg_ok[cidx] & (1 << (reg - AC97_PCM_FRONT_DAC_RATE))) continue; //printk(KERN_DEBUG "setting ac97 reg 0x%x to rate %d\n", reg, rate); err = snd_ac97_set_rate(pcm->r[r].codec[cidx], reg, rate); if (err < 0) snd_printk(KERN_ERR "error in snd_ac97_set_rate: cidx=%d, reg=0x%x, rate=%d, err=%d\n", cidx, reg, rate, err); else reg_ok[cidx] |= (1 << (reg - AC97_PCM_FRONT_DAC_RATE)); } } } pcm->aslots = slots; return 0; error: pcm->aslots = slots; snd_ac97_pcm_close(pcm); return err; } EXPORT_SYMBOL(snd_ac97_pcm_open); /** * snd_ac97_pcm_close - closes the given AC97 pcm * @pcm: the ac97 pcm instance * * It frees the locked AC97 slots. */ int snd_ac97_pcm_close(struct ac97_pcm *pcm) { struct snd_ac97_bus *bus; unsigned short slots = pcm->aslots; int i, cidx; #ifdef CONFIG_SND_AC97_POWER_SAVE int r = pcm->cur_dbl; for (i = 3; i < 12; i++) { if (!(slots & (1 << i))) continue; for (cidx = 0; cidx < 4; cidx++) { if (pcm->r[r].rslots[cidx] & (1 << i)) { int reg = get_slot_reg(pcm, cidx, i, r); snd_ac97_update_power(pcm->r[r].codec[cidx], reg, 0); } } } #endif bus = pcm->bus; spin_lock_irq(&pcm->bus->bus_lock); for (i = 3; i < 12; i++) { if (!(slots & (1 << i))) continue; for (cidx = 0; cidx < 4; cidx++) bus->used_slots[pcm->stream][cidx] &= ~(1 << i); } pcm->aslots = 0; pcm->cur_dbl = 0; spin_unlock_irq(&pcm->bus->bus_lock); return 0; } EXPORT_SYMBOL(snd_ac97_pcm_close); static int double_rate_hw_constraint_rate(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); if (channels->min > 2) { static const struct snd_interval single_rates = { .min = 1, .max = 48000, }; struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); return snd_interval_refine(rate, &single_rates); } return 0; } static int double_rate_hw_constraint_channels(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval *rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); if (rate->min > 48000) { static const struct snd_interval double_rate_channels = { .min = 2, .max = 2, }; struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); return snd_interval_refine(channels, &double_rate_channels); } return 0; } /** * snd_ac97_pcm_double_rate_rules - set double rate constraints * @runtime: the runtime of the ac97 front playback pcm * * Installs the hardware constraint rules to prevent using double rates and * more than two channels at the same time. */ int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime) { int err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, double_rate_hw_constraint_rate, NULL, SNDRV_PCM_HW_PARAM_CHANNELS, -1); if (err < 0) return err; err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, double_rate_hw_constraint_channels, NULL, SNDRV_PCM_HW_PARAM_RATE, -1); return err; } EXPORT_SYMBOL(snd_ac97_pcm_double_rate_rules);
gpl-2.0
SerenityS/android_kernel_lge_msm8974
drivers/usb/host/uhci-grlib.c
5649
5551
/* * UHCI HCD (Host Controller Driver) for GRLIB GRUSBHC * * Copyright (c) 2011 Jan Andersson <jan@gaisler.com> * * This file is based on UHCI PCI HCD: * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com * (C) Copyright 1999 Randy Dunlap * (C) Copyright 1999 Georg Acher, acher@in.tum.de * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu */ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> static int uhci_grlib_init(struct usb_hcd *hcd) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); /* * Probe to determine the endianness of the controller. * We know that bit 7 of the PORTSC1 register is always set * and bit 15 is always clear. If uhci_readw() yields a value * with bit 7 (0x80) turned on then the current little-endian * setting is correct. Otherwise we assume the value was * byte-swapped; hence the register interface and presumably * also the descriptors are big-endian. */ if (!(uhci_readw(uhci, USBPORTSC1) & 0x80)) { uhci->big_endian_mmio = 1; uhci->big_endian_desc = 1; } uhci->rh_numports = uhci_count_ports(hcd); /* Set up pointers to to generic functions */ uhci->reset_hc = uhci_generic_reset_hc; uhci->check_and_reset_hc = uhci_generic_check_and_reset_hc; /* No special actions need to be taken for the functions below */ uhci->configure_hc = NULL; uhci->resume_detect_interrupts_are_broken = NULL; uhci->global_suspend_mode_is_broken = NULL; /* Reset if the controller isn't already safely quiescent. */ check_and_reset_hc(uhci); return 0; } static const struct hc_driver uhci_grlib_hc_driver = { .description = hcd_name, .product_desc = "GRLIB GRUSBHC UHCI Host Controller", .hcd_priv_size = sizeof(struct uhci_hcd), /* Generic hardware linkage */ .irq = uhci_irq, .flags = HCD_MEMORY | HCD_USB11, /* Basic lifecycle operations */ .reset = uhci_grlib_init, .start = uhci_start, #ifdef CONFIG_PM .pci_suspend = NULL, .pci_resume = NULL, .bus_suspend = uhci_rh_suspend, .bus_resume = uhci_rh_resume, #endif .stop = uhci_stop, .urb_enqueue = uhci_urb_enqueue, .urb_dequeue = uhci_urb_dequeue, .endpoint_disable = uhci_hcd_endpoint_disable, .get_frame_number = uhci_hcd_get_frame_number, .hub_status_data = uhci_hub_status_data, .hub_control = uhci_hub_control, }; static int __devinit uhci_hcd_grlib_probe(struct platform_device *op) { struct device_node *dn = op->dev.of_node; struct usb_hcd *hcd; struct uhci_hcd *uhci = NULL; struct resource res; int irq; int rv; if (usb_disabled()) return -ENODEV; dev_dbg(&op->dev, "initializing GRUSBHC UHCI USB Controller\n"); rv = of_address_to_resource(dn, 0, &res); if (rv) return rv; /* usb_create_hcd requires dma_mask != NULL */ op->dev.dma_mask = &op->dev.coherent_dma_mask; hcd = usb_create_hcd(&uhci_grlib_hc_driver, &op->dev, "GRUSBHC UHCI USB"); if (!hcd) return -ENOMEM; hcd->rsrc_start = res.start; hcd->rsrc_len = resource_size(&res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } uhci = hcd_to_uhci(hcd); uhci->regs = hcd->regs; rv = usb_add_hcd(hcd, irq, 0); if (rv) goto err_uhci; return 0; err_uhci: iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err_rmr: usb_put_hcd(hcd); return rv; } static int uhci_hcd_grlib_remove(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); dev_set_drvdata(&op->dev, NULL); dev_dbg(&op->dev, "stopping GRLIB GRUSBHC UHCI USB Controller\n"); usb_remove_hcd(hcd); iounmap(hcd->regs); irq_dispose_mapping(hcd->irq); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); return 0; } /* Make sure the controller is quiescent and that we're not using it * any more. This is mainly for the benefit of programs which, like kexec, * expect the hardware to be idle: not doing DMA or generating IRQs. * * This routine may be called in a damaged or failing kernel. Hence we * do not acquire the spinlock before shutting down the controller. */ static void uhci_hcd_grlib_shutdown(struct platform_device *op) { struct usb_hcd *hcd = dev_get_drvdata(&op->dev); uhci_hc_died(hcd_to_uhci(hcd)); } static const struct of_device_id uhci_hcd_grlib_of_match[] = { { .name = "GAISLER_UHCI", }, { .name = "01_027", }, {}, }; MODULE_DEVICE_TABLE(of, uhci_hcd_grlib_of_match); static struct platform_driver uhci_grlib_driver = { .probe = uhci_hcd_grlib_probe, .remove = uhci_hcd_grlib_remove, .shutdown = uhci_hcd_grlib_shutdown, .driver = { .name = "grlib-uhci", .owner = THIS_MODULE, .of_match_table = uhci_hcd_grlib_of_match, }, };
gpl-2.0
bilalliberty/android_kernel_htc_ville-liberty
tools/power/cpupower/bench/parse.c
8465
5397
/* cpufreq-bench CPUFreq microbenchmark * * Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <time.h> #include <dirent.h> #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include "parse.h" #include "config.h" /** * converts priority string to priority * * @param str string that represents a scheduler priority * * @retval priority * @retval SCHED_ERR when the priority doesn't exit **/ enum sched_prio string_to_prio(const char *str) { if (strncasecmp("high", str, strlen(str)) == 0) return SCHED_HIGH; else if (strncasecmp("default", str, strlen(str)) == 0) return SCHED_DEFAULT; else if (strncasecmp("low", str, strlen(str)) == 0) return SCHED_LOW; else return SCHED_ERR; } /** * create and open logfile * * @param dir directory in which the logfile should be created * * @retval logfile on success * @retval NULL when the file can't be created **/ FILE *prepare_output(const char *dirname) { FILE *output = NULL; int len; char *filename; struct utsname sysdata; DIR *dir; dir = opendir(dirname); if (dir == NULL) { if (mkdir(dirname, 0755)) { perror("mkdir"); fprintf(stderr, "error: Cannot create dir %s\n", dirname); return NULL; } } len = strlen(dirname) + 30; filename = malloc(sizeof(char) * len); if (uname(&sysdata) == 0) { len += strlen(sysdata.nodename) + strlen(sysdata.release); filename = realloc(filename, sizeof(char) * len); if (filename == NULL) { perror("realloc"); return NULL; } snprintf(filename, len - 1, "%s/benchmark_%s_%s_%li.log", dirname, sysdata.nodename, sysdata.release, time(NULL)); } else { snprintf(filename, len - 1, "%s/benchmark_%li.log", dirname, time(NULL)); } dprintf("logilename: %s\n", filename); output = fopen(filename, "w+"); if (output == NULL) { perror("fopen"); fprintf(stderr, "error: unable to open logfile\n"); } fprintf(stdout, "Logfile: %s\n", filename); free(filename); fprintf(output, "#round load sleep performance powersave percentage\n"); return output; } /** * returns the default config * * @retval default config on success * @retval NULL when the output file can't be created **/ struct config *prepare_default_config() { struct config *config = malloc(sizeof(struct config)); dprintf("loading defaults\n"); config->sleep = 500000; config->load = 500000; config->sleep_step = 500000; config->load_step = 500000; config->cycles = 5; config->rounds = 50; config->cpu = 0; config->prio = SCHED_HIGH; config->verbose = 0; strncpy(config->governor, "ondemand", 8); config->output = stdout; #ifdef DEFAULT_CONFIG_FILE if (prepare_config(DEFAULT_CONFIG_FILE, config)) return NULL; #endif return config; } /** * parses config file and returns the config to the caller * * @param path config file name * * @retval 1 on error * @retval 0 on success **/ int prepare_config(const char *path, struct config *config) { size_t len = 0; char *opt, *val, *line = NULL; FILE *configfile = fopen(path, "r"); if (config == NULL) { fprintf(stderr, "error: config is NULL\n"); return 1; } if (configfile == NULL) { perror("fopen"); fprintf(stderr, "error: unable to read configfile\n"); free(config); return 1; } while (getline(&line, &len, configfile) != -1) { if (line[0] == '#' || line[0] == ' ') continue; sscanf(line, "%as = %as", &opt, &val); dprintf("parsing: %s -> %s\n", opt, val); if (strncmp("sleep", opt, strlen(opt)) == 0) sscanf(val, "%li", &config->sleep); else if (strncmp("load", opt, strlen(opt)) == 0) sscanf(val, "%li", &config->load); else if (strncmp("load_step", opt, strlen(opt)) == 0) sscanf(val, "%li", &config->load_step); else if (strncmp("sleep_step", opt, strlen(opt)) == 0) sscanf(val, "%li", &config->sleep_step); else if (strncmp("cycles", opt, strlen(opt)) == 0) sscanf(val, "%u", &config->cycles); else if (strncmp("rounds", opt, strlen(opt)) == 0) sscanf(val, "%u", &config->rounds); else if (strncmp("verbose", opt, strlen(opt)) == 0) sscanf(val, "%u", &config->verbose); else if (strncmp("output", opt, strlen(opt)) == 0) config->output = prepare_output(val); else if (strncmp("cpu", opt, strlen(opt)) == 0) sscanf(val, "%u", &config->cpu); else if (strncmp("governor", opt, 14) == 0) strncpy(config->governor, val, 14); else if (strncmp("priority", opt, strlen(opt)) == 0) { if (string_to_prio(val) != SCHED_ERR) config->prio = string_to_prio(val); } } free(line); free(opt); free(val); return 0; }
gpl-2.0
viaembedded/vab820-kernel-bsp
arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c
9489
43141
/* * SH7785 Pinmux * * Copyright (C) 2008 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/gpio.h> #include <cpu/sh7785.h> enum { PINMUX_RESERVED = 0, PINMUX_DATA_BEGIN, PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA, PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA, PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA, PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA, PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA, PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA, PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA, PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA, PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA, PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA, PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA, PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA, PM1_DATA, PM0_DATA, PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA, PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA, PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA, PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA, PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA, PINMUX_DATA_END, PINMUX_INPUT_BEGIN, PA7_IN, PA6_IN, PA5_IN, PA4_IN, PA3_IN, PA2_IN, PA1_IN, PA0_IN, PB7_IN, PB6_IN, PB5_IN, PB4_IN, PB3_IN, PB2_IN, PB1_IN, PB0_IN, PC7_IN, PC6_IN, PC5_IN, PC4_IN, PC3_IN, PC2_IN, PC1_IN, PC0_IN, PD7_IN, PD6_IN, PD5_IN, PD4_IN, PD3_IN, PD2_IN, PD1_IN, PD0_IN, PE5_IN, PE4_IN, PE3_IN, PE2_IN, PE1_IN, PE0_IN, PF7_IN, PF6_IN, PF5_IN, PF4_IN, PF3_IN, PF2_IN, PF1_IN, PF0_IN, PG7_IN, PG6_IN, PG5_IN, PG4_IN, PG3_IN, PG2_IN, PG1_IN, PG0_IN, PH7_IN, PH6_IN, PH5_IN, PH4_IN, PH3_IN, PH2_IN, PH1_IN, PH0_IN, PJ7_IN, PJ6_IN, PJ5_IN, PJ4_IN, PJ3_IN, PJ2_IN, PJ1_IN, PJ0_IN, PK7_IN, PK6_IN, PK5_IN, PK4_IN, PK3_IN, PK2_IN, PK1_IN, PK0_IN, PL7_IN, PL6_IN, PL5_IN, PL4_IN, PL3_IN, PL2_IN, PL1_IN, PL0_IN, PM1_IN, PM0_IN, PN7_IN, PN6_IN, PN5_IN, PN4_IN, PN3_IN, PN2_IN, PN1_IN, PN0_IN, PP5_IN, PP4_IN, PP3_IN, PP2_IN, PP1_IN, PP0_IN, PQ4_IN, PQ3_IN, PQ2_IN, PQ1_IN, PQ0_IN, PR3_IN, PR2_IN, PR1_IN, PR0_IN, PINMUX_INPUT_END, PINMUX_INPUT_PULLUP_BEGIN, PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU, PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU, PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU, PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU, PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU, PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU, PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU, PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU, PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU, PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU, PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU, PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU, PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU, PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU, PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU, PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU, PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU, PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU, PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU, PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU, PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU, PM1_IN_PU, PM0_IN_PU, PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU, PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU, PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU, PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU, PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU, PINMUX_INPUT_PULLUP_END, PINMUX_OUTPUT_BEGIN, PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT, PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT, PB7_OUT, PB6_OUT, PB5_OUT, PB4_OUT, PB3_OUT, PB2_OUT, PB1_OUT, PB0_OUT, PC7_OUT, PC6_OUT, PC5_OUT, PC4_OUT, PC3_OUT, PC2_OUT, PC1_OUT, PC0_OUT, PD7_OUT, PD6_OUT, PD5_OUT, PD4_OUT, PD3_OUT, PD2_OUT, PD1_OUT, PD0_OUT, PE5_OUT, PE4_OUT, PE3_OUT, PE2_OUT, PE1_OUT, PE0_OUT, PF7_OUT, PF6_OUT, PF5_OUT, PF4_OUT, PF3_OUT, PF2_OUT, PF1_OUT, PF0_OUT, PG7_OUT, PG6_OUT, PG5_OUT, PG4_OUT, PG3_OUT, PG2_OUT, PG1_OUT, PG0_OUT, PH7_OUT, PH6_OUT, PH5_OUT, PH4_OUT, PH3_OUT, PH2_OUT, PH1_OUT, PH0_OUT, PJ7_OUT, PJ6_OUT, PJ5_OUT, PJ4_OUT, PJ3_OUT, PJ2_OUT, PJ1_OUT, PJ0_OUT, PK7_OUT, PK6_OUT, PK5_OUT, PK4_OUT, PK3_OUT, PK2_OUT, PK1_OUT, PK0_OUT, PL7_OUT, PL6_OUT, PL5_OUT, PL4_OUT, PL3_OUT, PL2_OUT, PL1_OUT, PL0_OUT, PM1_OUT, PM0_OUT, PN7_OUT, PN6_OUT, PN5_OUT, PN4_OUT, PN3_OUT, PN2_OUT, PN1_OUT, PN0_OUT, PP5_OUT, PP4_OUT, PP3_OUT, PP2_OUT, PP1_OUT, PP0_OUT, PQ4_OUT, PQ3_OUT, PQ2_OUT, PQ1_OUT, PQ0_OUT, PR3_OUT, PR2_OUT, PR1_OUT, PR0_OUT, PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PA7_FN, PA6_FN, PA5_FN, PA4_FN, PA3_FN, PA2_FN, PA1_FN, PA0_FN, PB7_FN, PB6_FN, PB5_FN, PB4_FN, PB3_FN, PB2_FN, PB1_FN, PB0_FN, PC7_FN, PC6_FN, PC5_FN, PC4_FN, PC3_FN, PC2_FN, PC1_FN, PC0_FN, PD7_FN, PD6_FN, PD5_FN, PD4_FN, PD3_FN, PD2_FN, PD1_FN, PD0_FN, PE5_FN, PE4_FN, PE3_FN, PE2_FN, PE1_FN, PE0_FN, PF7_FN, PF6_FN, PF5_FN, PF4_FN, PF3_FN, PF2_FN, PF1_FN, PF0_FN, PG7_FN, PG6_FN, PG5_FN, PG4_FN, PG3_FN, PG2_FN, PG1_FN, PG0_FN, PH7_FN, PH6_FN, PH5_FN, PH4_FN, PH3_FN, PH2_FN, PH1_FN, PH0_FN, PJ7_FN, PJ6_FN, PJ5_FN, PJ4_FN, PJ3_FN, PJ2_FN, PJ1_FN, PJ0_FN, PK7_FN, PK6_FN, PK5_FN, PK4_FN, PK3_FN, PK2_FN, PK1_FN, PK0_FN, PL7_FN, PL6_FN, PL5_FN, PL4_FN, PL3_FN, PL2_FN, PL1_FN, PL0_FN, PM1_FN, PM0_FN, PN7_FN, PN6_FN, PN5_FN, PN4_FN, PN3_FN, PN2_FN, PN1_FN, PN0_FN, PP5_FN, PP4_FN, PP3_FN, PP2_FN, PP1_FN, PP0_FN, PQ4_FN, PQ3_FN, PQ2_FN, PQ1_FN, PQ0_FN, PR3_FN, PR2_FN, PR1_FN, PR0_FN, P1MSEL15_0, P1MSEL15_1, P1MSEL14_0, P1MSEL14_1, P1MSEL13_0, P1MSEL13_1, P1MSEL12_0, P1MSEL12_1, P1MSEL11_0, P1MSEL11_1, P1MSEL10_0, P1MSEL10_1, P1MSEL9_0, P1MSEL9_1, P1MSEL8_0, P1MSEL8_1, P1MSEL7_0, P1MSEL7_1, P1MSEL6_0, P1MSEL6_1, P1MSEL5_0, P1MSEL4_0, P1MSEL4_1, P1MSEL3_0, P1MSEL3_1, P1MSEL2_0, P1MSEL2_1, P1MSEL1_0, P1MSEL1_1, P1MSEL0_0, P1MSEL0_1, P2MSEL2_0, P2MSEL2_1, P2MSEL1_0, P2MSEL1_1, P2MSEL0_0, P2MSEL0_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, D63_AD31_MARK, D62_AD30_MARK, D61_AD29_MARK, D60_AD28_MARK, D59_AD27_MARK, D58_AD26_MARK, D57_AD25_MARK, D56_AD24_MARK, D55_AD23_MARK, D54_AD22_MARK, D53_AD21_MARK, D52_AD20_MARK, D51_AD19_MARK, D50_AD18_MARK, D49_AD17_DB5_MARK, D48_AD16_DB4_MARK, D47_AD15_DB3_MARK, D46_AD14_DB2_MARK, D45_AD13_DB1_MARK, D44_AD12_DB0_MARK, D43_AD11_DG5_MARK, D42_AD10_DG4_MARK, D41_AD9_DG3_MARK, D40_AD8_DG2_MARK, D39_AD7_DG1_MARK, D38_AD6_DG0_MARK, D37_AD5_DR5_MARK, D36_AD4_DR4_MARK, D35_AD3_DR3_MARK, D34_AD2_DR2_MARK, D33_AD1_DR1_MARK, D32_AD0_DR0_MARK, REQ1_MARK, REQ2_MARK, REQ3_MARK, GNT1_MARK, GNT2_MARK, GNT3_MARK, MMCCLK_MARK, D31_MARK, D30_MARK, D29_MARK, D28_MARK, D27_MARK, D26_MARK, D25_MARK, D24_MARK, D23_MARK, D22_MARK, D21_MARK, D20_MARK, D19_MARK, D18_MARK, D17_MARK, D16_MARK, SCIF1_SCK_MARK, SCIF1_RXD_MARK, SCIF1_TXD_MARK, SCIF0_CTS_MARK, INTD_MARK, FCE_MARK, SCIF0_RTS_MARK, HSPI_CS_MARK, FSE_MARK, SCIF0_SCK_MARK, HSPI_CLK_MARK, FRE_MARK, SCIF0_RXD_MARK, HSPI_RX_MARK, FRB_MARK, SCIF0_TXD_MARK, HSPI_TX_MARK, FWE_MARK, SCIF5_TXD_MARK, HAC1_SYNC_MARK, SSI1_WS_MARK, SIOF_TXD_PJ_MARK, HAC0_SDOUT_MARK, SSI0_SDATA_MARK, SIOF_RXD_PJ_MARK, HAC0_SDIN_MARK, SSI0_SCK_MARK, SIOF_SYNC_PJ_MARK, HAC0_SYNC_MARK, SSI0_WS_MARK, SIOF_MCLK_PJ_MARK, HAC_RES_MARK, SIOF_SCK_PJ_MARK, HAC0_BITCLK_MARK, SSI0_CLK_MARK, HAC1_BITCLK_MARK, SSI1_CLK_MARK, TCLK_MARK, IOIS16_MARK, STATUS0_MARK, DRAK0_PK3_MARK, STATUS1_MARK, DRAK1_PK2_MARK, DACK2_MARK, SCIF2_TXD_MARK, MMCCMD_MARK, SIOF_TXD_PK_MARK, DACK3_MARK, SCIF2_SCK_MARK, MMCDAT_MARK, SIOF_SCK_PK_MARK, DREQ0_MARK, DREQ1_MARK, DRAK0_PK1_MARK, DRAK1_PK0_MARK, DREQ2_MARK, INTB_MARK, DREQ3_MARK, INTC_MARK, DRAK2_MARK, CE2A_MARK, IRL4_MARK, FD4_MARK, IRL5_MARK, FD5_MARK, IRL6_MARK, FD6_MARK, IRL7_MARK, FD7_MARK, DRAK3_MARK, CE2B_MARK, BREQ_BSACK_MARK, BACK_BSREQ_MARK, SCIF5_RXD_MARK, HAC1_SDIN_MARK, SSI1_SCK_MARK, SCIF5_SCK_MARK, HAC1_SDOUT_MARK, SSI1_SDATA_MARK, SCIF3_TXD_MARK, FCLE_MARK, SCIF3_RXD_MARK, FALE_MARK, SCIF3_SCK_MARK, FD0_MARK, SCIF4_TXD_MARK, FD1_MARK, SCIF4_RXD_MARK, FD2_MARK, SCIF4_SCK_MARK, FD3_MARK, DEVSEL_DCLKOUT_MARK, STOP_CDE_MARK, LOCK_ODDF_MARK, TRDY_DISPL_MARK, IRDY_HSYNC_MARK, PCIFRAME_VSYNC_MARK, INTA_MARK, GNT0_GNTIN_MARK, REQ0_REQOUT_MARK, PERR_MARK, SERR_MARK, WE7_CBE3_MARK, WE6_CBE2_MARK, WE5_CBE1_MARK, WE4_CBE0_MARK, SCIF2_RXD_MARK, SIOF_RXD_MARK, MRESETOUT_MARK, IRQOUT_MARK, PINMUX_MARK_END, }; static pinmux_enum_t pinmux_data[] = { /* PA GPIO */ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU), PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU), PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU), PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU), PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU), PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU), PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU), PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU), /* PB GPIO */ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU), PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU), PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU), PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU), PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU), PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU), PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU), PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU), /* PC GPIO */ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU), PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU), PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU), PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU), PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU), PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU), PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU), PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU), /* PD GPIO */ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU), PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU), PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU), PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU), PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU), PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU), PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU), PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU), /* PE GPIO */ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU), PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU), PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU), PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU), PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU), PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU), /* PF GPIO */ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU), PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU), PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU), PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU), PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU), PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU), PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU), PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU), /* PG GPIO */ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU), PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU), PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU), PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU), PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU), PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU), PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU), PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU), /* PH GPIO */ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU), PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU), PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU), PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU), PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU), PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU), PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU), PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU), /* PJ GPIO */ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU), PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU), PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU), PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU), PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU), PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU), PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU), PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU), /* PK GPIO */ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU), PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU), PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU), PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU), PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU), PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU), PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU), PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU), /* PL GPIO */ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU), PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU), PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU), PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU), PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU), PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU), PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU), PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU), /* PM GPIO */ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU), PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU), /* PN GPIO */ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU), PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU), PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU), PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU), PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU), PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU), PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU), PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU), /* PP GPIO */ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU), PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU), PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU), PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU), PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU), PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU), /* PQ GPIO */ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU), PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU), PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU), PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU), PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU), /* PR GPIO */ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU), PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU), PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU), PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU), /* PA FN */ PINMUX_DATA(D63_AD31_MARK, PA7_FN), PINMUX_DATA(D62_AD30_MARK, PA6_FN), PINMUX_DATA(D61_AD29_MARK, PA5_FN), PINMUX_DATA(D60_AD28_MARK, PA4_FN), PINMUX_DATA(D59_AD27_MARK, PA3_FN), PINMUX_DATA(D58_AD26_MARK, PA2_FN), PINMUX_DATA(D57_AD25_MARK, PA1_FN), PINMUX_DATA(D56_AD24_MARK, PA0_FN), /* PB FN */ PINMUX_DATA(D55_AD23_MARK, PB7_FN), PINMUX_DATA(D54_AD22_MARK, PB6_FN), PINMUX_DATA(D53_AD21_MARK, PB5_FN), PINMUX_DATA(D52_AD20_MARK, PB4_FN), PINMUX_DATA(D51_AD19_MARK, PB3_FN), PINMUX_DATA(D50_AD18_MARK, PB2_FN), PINMUX_DATA(D49_AD17_DB5_MARK, PB1_FN), PINMUX_DATA(D48_AD16_DB4_MARK, PB0_FN), /* PC FN */ PINMUX_DATA(D47_AD15_DB3_MARK, PC7_FN), PINMUX_DATA(D46_AD14_DB2_MARK, PC6_FN), PINMUX_DATA(D45_AD13_DB1_MARK, PC5_FN), PINMUX_DATA(D44_AD12_DB0_MARK, PC4_FN), PINMUX_DATA(D43_AD11_DG5_MARK, PC3_FN), PINMUX_DATA(D42_AD10_DG4_MARK, PC2_FN), PINMUX_DATA(D41_AD9_DG3_MARK, PC1_FN), PINMUX_DATA(D40_AD8_DG2_MARK, PC0_FN), /* PD FN */ PINMUX_DATA(D39_AD7_DG1_MARK, PD7_FN), PINMUX_DATA(D38_AD6_DG0_MARK, PD6_FN), PINMUX_DATA(D37_AD5_DR5_MARK, PD5_FN), PINMUX_DATA(D36_AD4_DR4_MARK, PD4_FN), PINMUX_DATA(D35_AD3_DR3_MARK, PD3_FN), PINMUX_DATA(D34_AD2_DR2_MARK, PD2_FN), PINMUX_DATA(D33_AD1_DR1_MARK, PD1_FN), PINMUX_DATA(D32_AD0_DR0_MARK, PD0_FN), /* PE FN */ PINMUX_DATA(REQ1_MARK, PE5_FN), PINMUX_DATA(REQ2_MARK, PE4_FN), PINMUX_DATA(REQ3_MARK, P2MSEL0_0, PE3_FN), PINMUX_DATA(GNT1_MARK, PE2_FN), PINMUX_DATA(GNT2_MARK, PE1_FN), PINMUX_DATA(GNT3_MARK, P2MSEL0_0, PE0_FN), PINMUX_DATA(MMCCLK_MARK, P2MSEL0_1, PE0_FN), /* PF FN */ PINMUX_DATA(D31_MARK, PF7_FN), PINMUX_DATA(D30_MARK, PF6_FN), PINMUX_DATA(D29_MARK, PF5_FN), PINMUX_DATA(D28_MARK, PF4_FN), PINMUX_DATA(D27_MARK, PF3_FN), PINMUX_DATA(D26_MARK, PF2_FN), PINMUX_DATA(D25_MARK, PF1_FN), PINMUX_DATA(D24_MARK, PF0_FN), /* PF FN */ PINMUX_DATA(D23_MARK, PG7_FN), PINMUX_DATA(D22_MARK, PG6_FN), PINMUX_DATA(D21_MARK, PG5_FN), PINMUX_DATA(D20_MARK, PG4_FN), PINMUX_DATA(D19_MARK, PG3_FN), PINMUX_DATA(D18_MARK, PG2_FN), PINMUX_DATA(D17_MARK, PG1_FN), PINMUX_DATA(D16_MARK, PG0_FN), /* PH FN */ PINMUX_DATA(SCIF1_SCK_MARK, PH7_FN), PINMUX_DATA(SCIF1_RXD_MARK, PH6_FN), PINMUX_DATA(SCIF1_TXD_MARK, PH5_FN), PINMUX_DATA(SCIF0_CTS_MARK, PH4_FN), PINMUX_DATA(INTD_MARK, P1MSEL7_1, PH4_FN), PINMUX_DATA(FCE_MARK, P1MSEL8_1, P1MSEL7_0, PH4_FN), PINMUX_DATA(SCIF0_RTS_MARK, P1MSEL8_0, P1MSEL7_0, PH3_FN), PINMUX_DATA(HSPI_CS_MARK, P1MSEL8_0, P1MSEL7_1, PH3_FN), PINMUX_DATA(FSE_MARK, P1MSEL8_1, P1MSEL7_0, PH3_FN), PINMUX_DATA(SCIF0_SCK_MARK, P1MSEL8_0, P1MSEL7_0, PH2_FN), PINMUX_DATA(HSPI_CLK_MARK, P1MSEL8_0, P1MSEL7_1, PH2_FN), PINMUX_DATA(FRE_MARK, P1MSEL8_1, P1MSEL7_0, PH2_FN), PINMUX_DATA(SCIF0_RXD_MARK, P1MSEL8_0, P1MSEL7_0, PH1_FN), PINMUX_DATA(HSPI_RX_MARK, P1MSEL8_0, P1MSEL7_1, PH1_FN), PINMUX_DATA(FRB_MARK, P1MSEL8_1, P1MSEL7_0, PH1_FN), PINMUX_DATA(SCIF0_TXD_MARK, P1MSEL8_0, P1MSEL7_0, PH0_FN), PINMUX_DATA(HSPI_TX_MARK, P1MSEL8_0, P1MSEL7_1, PH0_FN), PINMUX_DATA(FWE_MARK, P1MSEL8_1, P1MSEL7_0, PH0_FN), /* PJ FN */ PINMUX_DATA(SCIF5_TXD_MARK, P1MSEL2_0, P1MSEL1_0, PJ7_FN), PINMUX_DATA(HAC1_SYNC_MARK, P1MSEL2_0, P1MSEL1_1, PJ7_FN), PINMUX_DATA(SSI1_WS_MARK, P1MSEL2_1, P1MSEL1_0, PJ7_FN), PINMUX_DATA(SIOF_TXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ6_FN), PINMUX_DATA(HAC0_SDOUT_MARK, P1MSEL4_0, P1MSEL3_1, PJ6_FN), PINMUX_DATA(SSI0_SDATA_MARK, P1MSEL4_1, P1MSEL3_0, PJ6_FN), PINMUX_DATA(SIOF_RXD_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ5_FN), PINMUX_DATA(HAC0_SDIN_MARK, P1MSEL4_0, P1MSEL3_1, PJ5_FN), PINMUX_DATA(SSI0_SCK_MARK, P1MSEL4_1, P1MSEL3_0, PJ5_FN), PINMUX_DATA(SIOF_SYNC_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ4_FN), PINMUX_DATA(HAC0_SYNC_MARK, P1MSEL4_0, P1MSEL3_1, PJ4_FN), PINMUX_DATA(SSI0_WS_MARK, P1MSEL4_1, P1MSEL3_0, PJ4_FN), PINMUX_DATA(SIOF_MCLK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ3_FN), PINMUX_DATA(HAC_RES_MARK, P1MSEL4_0, P1MSEL3_1, PJ3_FN), PINMUX_DATA(SIOF_SCK_PJ_MARK, P2MSEL1_0, P1MSEL4_0, P1MSEL3_0, PJ2_FN), PINMUX_DATA(HAC0_BITCLK_MARK, P1MSEL4_0, P1MSEL3_1, PJ2_FN), PINMUX_DATA(SSI0_CLK_MARK, P1MSEL4_1, P1MSEL3_0, PJ2_FN), PINMUX_DATA(HAC1_BITCLK_MARK, P1MSEL2_0, PJ1_FN), PINMUX_DATA(SSI1_CLK_MARK, P1MSEL2_1, P1MSEL1_0, PJ1_FN), PINMUX_DATA(TCLK_MARK, P1MSEL9_0, PJ0_FN), PINMUX_DATA(IOIS16_MARK, P1MSEL9_1, PJ0_FN), /* PK FN */ PINMUX_DATA(STATUS0_MARK, P1MSEL15_0, PK7_FN), PINMUX_DATA(DRAK0_PK3_MARK, P1MSEL15_1, PK7_FN), PINMUX_DATA(STATUS1_MARK, P1MSEL15_0, PK6_FN), PINMUX_DATA(DRAK1_PK2_MARK, P1MSEL15_1, PK6_FN), PINMUX_DATA(DACK2_MARK, P1MSEL12_0, P1MSEL11_0, PK5_FN), PINMUX_DATA(SCIF2_TXD_MARK, P1MSEL12_1, P1MSEL11_0, PK5_FN), PINMUX_DATA(MMCCMD_MARK, P1MSEL12_1, P1MSEL11_1, PK5_FN), PINMUX_DATA(SIOF_TXD_PK_MARK, P2MSEL1_1, P1MSEL12_0, P1MSEL11_1, PK5_FN), PINMUX_DATA(DACK3_MARK, P1MSEL12_0, P1MSEL11_0, PK4_FN), PINMUX_DATA(SCIF2_SCK_MARK, P1MSEL12_1, P1MSEL11_0, PK4_FN), PINMUX_DATA(MMCDAT_MARK, P1MSEL12_1, P1MSEL11_1, PK4_FN), PINMUX_DATA(SIOF_SCK_PK_MARK, P2MSEL1_1, P1MSEL12_0, P1MSEL11_1, PK4_FN), PINMUX_DATA(DREQ0_MARK, PK3_FN), PINMUX_DATA(DREQ1_MARK, PK2_FN), PINMUX_DATA(DRAK0_PK1_MARK, PK1_FN), PINMUX_DATA(DRAK1_PK0_MARK, PK0_FN), /* PL FN */ PINMUX_DATA(DREQ2_MARK, P1MSEL13_0, PL7_FN), PINMUX_DATA(INTB_MARK, P1MSEL13_1, PL7_FN), PINMUX_DATA(DREQ3_MARK, P1MSEL13_0, PL6_FN), PINMUX_DATA(INTC_MARK, P1MSEL13_1, PL6_FN), PINMUX_DATA(DRAK2_MARK, P1MSEL10_0, PL5_FN), PINMUX_DATA(CE2A_MARK, P1MSEL10_1, PL5_FN), PINMUX_DATA(IRL4_MARK, P1MSEL14_0, PL4_FN), PINMUX_DATA(FD4_MARK, P1MSEL14_1, PL4_FN), PINMUX_DATA(IRL5_MARK, P1MSEL14_0, PL3_FN), PINMUX_DATA(FD5_MARK, P1MSEL14_1, PL3_FN), PINMUX_DATA(IRL6_MARK, P1MSEL14_0, PL2_FN), PINMUX_DATA(FD6_MARK, P1MSEL14_1, PL2_FN), PINMUX_DATA(IRL7_MARK, P1MSEL14_0, PL1_FN), PINMUX_DATA(FD7_MARK, P1MSEL14_1, PL1_FN), PINMUX_DATA(DRAK3_MARK, P1MSEL10_0, PL0_FN), PINMUX_DATA(CE2B_MARK, P1MSEL10_1, PL0_FN), /* PM FN */ PINMUX_DATA(BREQ_BSACK_MARK, PM1_FN), PINMUX_DATA(BACK_BSREQ_MARK, PM0_FN), /* PN FN */ PINMUX_DATA(SCIF5_RXD_MARK, P1MSEL2_0, P1MSEL1_0, PN7_FN), PINMUX_DATA(HAC1_SDIN_MARK, P1MSEL2_0, P1MSEL1_1, PN7_FN), PINMUX_DATA(SSI1_SCK_MARK, P1MSEL2_1, P1MSEL1_0, PN7_FN), PINMUX_DATA(SCIF5_SCK_MARK, P1MSEL2_0, P1MSEL1_0, PN6_FN), PINMUX_DATA(HAC1_SDOUT_MARK, P1MSEL2_0, P1MSEL1_1, PN6_FN), PINMUX_DATA(SSI1_SDATA_MARK, P1MSEL2_1, P1MSEL1_0, PN6_FN), PINMUX_DATA(SCIF3_TXD_MARK, P1MSEL0_0, PN5_FN), PINMUX_DATA(FCLE_MARK, P1MSEL0_1, PN5_FN), PINMUX_DATA(SCIF3_RXD_MARK, P1MSEL0_0, PN4_FN), PINMUX_DATA(FALE_MARK, P1MSEL0_1, PN4_FN), PINMUX_DATA(SCIF3_SCK_MARK, P1MSEL0_0, PN3_FN), PINMUX_DATA(FD0_MARK, P1MSEL0_1, PN3_FN), PINMUX_DATA(SCIF4_TXD_MARK, P1MSEL0_0, PN2_FN), PINMUX_DATA(FD1_MARK, P1MSEL0_1, PN2_FN), PINMUX_DATA(SCIF4_RXD_MARK, P1MSEL0_0, PN1_FN), PINMUX_DATA(FD2_MARK, P1MSEL0_1, PN1_FN), PINMUX_DATA(SCIF4_SCK_MARK, P1MSEL0_0, PN0_FN), PINMUX_DATA(FD3_MARK, P1MSEL0_1, PN0_FN), /* PP FN */ PINMUX_DATA(DEVSEL_DCLKOUT_MARK, PP5_FN), PINMUX_DATA(STOP_CDE_MARK, PP4_FN), PINMUX_DATA(LOCK_ODDF_MARK, PP3_FN), PINMUX_DATA(TRDY_DISPL_MARK, PP2_FN), PINMUX_DATA(IRDY_HSYNC_MARK, PP1_FN), PINMUX_DATA(PCIFRAME_VSYNC_MARK, PP0_FN), /* PQ FN */ PINMUX_DATA(INTA_MARK, PQ4_FN), PINMUX_DATA(GNT0_GNTIN_MARK, PQ3_FN), PINMUX_DATA(REQ0_REQOUT_MARK, PQ2_FN), PINMUX_DATA(PERR_MARK, PQ1_FN), PINMUX_DATA(SERR_MARK, PQ0_FN), /* PR FN */ PINMUX_DATA(WE7_CBE3_MARK, PR3_FN), PINMUX_DATA(WE6_CBE2_MARK, PR2_FN), PINMUX_DATA(WE5_CBE1_MARK, PR1_FN), PINMUX_DATA(WE4_CBE0_MARK, PR0_FN), /* MISC FN */ PINMUX_DATA(SCIF2_RXD_MARK, P1MSEL6_0, P1MSEL5_0), PINMUX_DATA(SIOF_RXD_MARK, P2MSEL1_1, P1MSEL6_1, P1MSEL5_0), PINMUX_DATA(MRESETOUT_MARK, P2MSEL2_0), PINMUX_DATA(IRQOUT_MARK, P2MSEL2_1), }; static struct pinmux_gpio pinmux_gpios[] = { /* PA */ PINMUX_GPIO(GPIO_PA7, PA7_DATA), PINMUX_GPIO(GPIO_PA6, PA6_DATA), PINMUX_GPIO(GPIO_PA5, PA5_DATA), PINMUX_GPIO(GPIO_PA4, PA4_DATA), PINMUX_GPIO(GPIO_PA3, PA3_DATA), PINMUX_GPIO(GPIO_PA2, PA2_DATA), PINMUX_GPIO(GPIO_PA1, PA1_DATA), PINMUX_GPIO(GPIO_PA0, PA0_DATA), /* PB */ PINMUX_GPIO(GPIO_PB7, PB7_DATA), PINMUX_GPIO(GPIO_PB6, PB6_DATA), PINMUX_GPIO(GPIO_PB5, PB5_DATA), PINMUX_GPIO(GPIO_PB4, PB4_DATA), PINMUX_GPIO(GPIO_PB3, PB3_DATA), PINMUX_GPIO(GPIO_PB2, PB2_DATA), PINMUX_GPIO(GPIO_PB1, PB1_DATA), PINMUX_GPIO(GPIO_PB0, PB0_DATA), /* PC */ PINMUX_GPIO(GPIO_PC7, PC7_DATA), PINMUX_GPIO(GPIO_PC6, PC6_DATA), PINMUX_GPIO(GPIO_PC5, PC5_DATA), PINMUX_GPIO(GPIO_PC4, PC4_DATA), PINMUX_GPIO(GPIO_PC3, PC3_DATA), PINMUX_GPIO(GPIO_PC2, PC2_DATA), PINMUX_GPIO(GPIO_PC1, PC1_DATA), PINMUX_GPIO(GPIO_PC0, PC0_DATA), /* PD */ PINMUX_GPIO(GPIO_PD7, PD7_DATA), PINMUX_GPIO(GPIO_PD6, PD6_DATA), PINMUX_GPIO(GPIO_PD5, PD5_DATA), PINMUX_GPIO(GPIO_PD4, PD4_DATA), PINMUX_GPIO(GPIO_PD3, PD3_DATA), PINMUX_GPIO(GPIO_PD2, PD2_DATA), PINMUX_GPIO(GPIO_PD1, PD1_DATA), PINMUX_GPIO(GPIO_PD0, PD0_DATA), /* PE */ PINMUX_GPIO(GPIO_PE5, PE5_DATA), PINMUX_GPIO(GPIO_PE4, PE4_DATA), PINMUX_GPIO(GPIO_PE3, PE3_DATA), PINMUX_GPIO(GPIO_PE2, PE2_DATA), PINMUX_GPIO(GPIO_PE1, PE1_DATA), PINMUX_GPIO(GPIO_PE0, PE0_DATA), /* PF */ PINMUX_GPIO(GPIO_PF7, PF7_DATA), PINMUX_GPIO(GPIO_PF6, PF6_DATA), PINMUX_GPIO(GPIO_PF5, PF5_DATA), PINMUX_GPIO(GPIO_PF4, PF4_DATA), PINMUX_GPIO(GPIO_PF3, PF3_DATA), PINMUX_GPIO(GPIO_PF2, PF2_DATA), PINMUX_GPIO(GPIO_PF1, PF1_DATA), PINMUX_GPIO(GPIO_PF0, PF0_DATA), /* PG */ PINMUX_GPIO(GPIO_PG7, PG7_DATA), PINMUX_GPIO(GPIO_PG6, PG6_DATA), PINMUX_GPIO(GPIO_PG5, PG5_DATA), PINMUX_GPIO(GPIO_PG4, PG4_DATA), PINMUX_GPIO(GPIO_PG3, PG3_DATA), PINMUX_GPIO(GPIO_PG2, PG2_DATA), PINMUX_GPIO(GPIO_PG1, PG1_DATA), PINMUX_GPIO(GPIO_PG0, PG0_DATA), /* PH */ PINMUX_GPIO(GPIO_PH7, PH7_DATA), PINMUX_GPIO(GPIO_PH6, PH6_DATA), PINMUX_GPIO(GPIO_PH5, PH5_DATA), PINMUX_GPIO(GPIO_PH4, PH4_DATA), PINMUX_GPIO(GPIO_PH3, PH3_DATA), PINMUX_GPIO(GPIO_PH2, PH2_DATA), PINMUX_GPIO(GPIO_PH1, PH1_DATA), PINMUX_GPIO(GPIO_PH0, PH0_DATA), /* PJ */ PINMUX_GPIO(GPIO_PJ7, PJ7_DATA), PINMUX_GPIO(GPIO_PJ6, PJ6_DATA), PINMUX_GPIO(GPIO_PJ5, PJ5_DATA), PINMUX_GPIO(GPIO_PJ4, PJ4_DATA), PINMUX_GPIO(GPIO_PJ3, PJ3_DATA), PINMUX_GPIO(GPIO_PJ2, PJ2_DATA), PINMUX_GPIO(GPIO_PJ1, PJ1_DATA), PINMUX_GPIO(GPIO_PJ0, PJ0_DATA), /* PK */ PINMUX_GPIO(GPIO_PK7, PK7_DATA), PINMUX_GPIO(GPIO_PK6, PK6_DATA), PINMUX_GPIO(GPIO_PK5, PK5_DATA), PINMUX_GPIO(GPIO_PK4, PK4_DATA), PINMUX_GPIO(GPIO_PK3, PK3_DATA), PINMUX_GPIO(GPIO_PK2, PK2_DATA), PINMUX_GPIO(GPIO_PK1, PK1_DATA), PINMUX_GPIO(GPIO_PK0, PK0_DATA), /* PL */ PINMUX_GPIO(GPIO_PL7, PL7_DATA), PINMUX_GPIO(GPIO_PL6, PL6_DATA), PINMUX_GPIO(GPIO_PL5, PL5_DATA), PINMUX_GPIO(GPIO_PL4, PL4_DATA), PINMUX_GPIO(GPIO_PL3, PL3_DATA), PINMUX_GPIO(GPIO_PL2, PL2_DATA), PINMUX_GPIO(GPIO_PL1, PL1_DATA), PINMUX_GPIO(GPIO_PL0, PL0_DATA), /* PM */ PINMUX_GPIO(GPIO_PM1, PM1_DATA), PINMUX_GPIO(GPIO_PM0, PM0_DATA), /* PN */ PINMUX_GPIO(GPIO_PN7, PN7_DATA), PINMUX_GPIO(GPIO_PN6, PN6_DATA), PINMUX_GPIO(GPIO_PN5, PN5_DATA), PINMUX_GPIO(GPIO_PN4, PN4_DATA), PINMUX_GPIO(GPIO_PN3, PN3_DATA), PINMUX_GPIO(GPIO_PN2, PN2_DATA), PINMUX_GPIO(GPIO_PN1, PN1_DATA), PINMUX_GPIO(GPIO_PN0, PN0_DATA), /* PP */ PINMUX_GPIO(GPIO_PP5, PP5_DATA), PINMUX_GPIO(GPIO_PP4, PP4_DATA), PINMUX_GPIO(GPIO_PP3, PP3_DATA), PINMUX_GPIO(GPIO_PP2, PP2_DATA), PINMUX_GPIO(GPIO_PP1, PP1_DATA), PINMUX_GPIO(GPIO_PP0, PP0_DATA), /* PQ */ PINMUX_GPIO(GPIO_PQ4, PQ4_DATA), PINMUX_GPIO(GPIO_PQ3, PQ3_DATA), PINMUX_GPIO(GPIO_PQ2, PQ2_DATA), PINMUX_GPIO(GPIO_PQ1, PQ1_DATA), PINMUX_GPIO(GPIO_PQ0, PQ0_DATA), /* PR */ PINMUX_GPIO(GPIO_PR3, PR3_DATA), PINMUX_GPIO(GPIO_PR2, PR2_DATA), PINMUX_GPIO(GPIO_PR1, PR1_DATA), PINMUX_GPIO(GPIO_PR0, PR0_DATA), /* FN */ PINMUX_GPIO(GPIO_FN_D63_AD31, D63_AD31_MARK), PINMUX_GPIO(GPIO_FN_D62_AD30, D62_AD30_MARK), PINMUX_GPIO(GPIO_FN_D61_AD29, D61_AD29_MARK), PINMUX_GPIO(GPIO_FN_D60_AD28, D60_AD28_MARK), PINMUX_GPIO(GPIO_FN_D59_AD27, D59_AD27_MARK), PINMUX_GPIO(GPIO_FN_D58_AD26, D58_AD26_MARK), PINMUX_GPIO(GPIO_FN_D57_AD25, D57_AD25_MARK), PINMUX_GPIO(GPIO_FN_D56_AD24, D56_AD24_MARK), PINMUX_GPIO(GPIO_FN_D55_AD23, D55_AD23_MARK), PINMUX_GPIO(GPIO_FN_D54_AD22, D54_AD22_MARK), PINMUX_GPIO(GPIO_FN_D53_AD21, D53_AD21_MARK), PINMUX_GPIO(GPIO_FN_D52_AD20, D52_AD20_MARK), PINMUX_GPIO(GPIO_FN_D51_AD19, D51_AD19_MARK), PINMUX_GPIO(GPIO_FN_D50_AD18, D50_AD18_MARK), PINMUX_GPIO(GPIO_FN_D49_AD17_DB5, D49_AD17_DB5_MARK), PINMUX_GPIO(GPIO_FN_D48_AD16_DB4, D48_AD16_DB4_MARK), PINMUX_GPIO(GPIO_FN_D47_AD15_DB3, D47_AD15_DB3_MARK), PINMUX_GPIO(GPIO_FN_D46_AD14_DB2, D46_AD14_DB2_MARK), PINMUX_GPIO(GPIO_FN_D45_AD13_DB1, D45_AD13_DB1_MARK), PINMUX_GPIO(GPIO_FN_D44_AD12_DB0, D44_AD12_DB0_MARK), PINMUX_GPIO(GPIO_FN_D43_AD11_DG5, D43_AD11_DG5_MARK), PINMUX_GPIO(GPIO_FN_D42_AD10_DG4, D42_AD10_DG4_MARK), PINMUX_GPIO(GPIO_FN_D41_AD9_DG3, D41_AD9_DG3_MARK), PINMUX_GPIO(GPIO_FN_D40_AD8_DG2, D40_AD8_DG2_MARK), PINMUX_GPIO(GPIO_FN_D39_AD7_DG1, D39_AD7_DG1_MARK), PINMUX_GPIO(GPIO_FN_D38_AD6_DG0, D38_AD6_DG0_MARK), PINMUX_GPIO(GPIO_FN_D37_AD5_DR5, D37_AD5_DR5_MARK), PINMUX_GPIO(GPIO_FN_D36_AD4_DR4, D36_AD4_DR4_MARK), PINMUX_GPIO(GPIO_FN_D35_AD3_DR3, D35_AD3_DR3_MARK), PINMUX_GPIO(GPIO_FN_D34_AD2_DR2, D34_AD2_DR2_MARK), PINMUX_GPIO(GPIO_FN_D33_AD1_DR1, D33_AD1_DR1_MARK), PINMUX_GPIO(GPIO_FN_D32_AD0_DR0, D32_AD0_DR0_MARK), PINMUX_GPIO(GPIO_FN_REQ1, REQ1_MARK), PINMUX_GPIO(GPIO_FN_REQ2, REQ2_MARK), PINMUX_GPIO(GPIO_FN_REQ3, REQ3_MARK), PINMUX_GPIO(GPIO_FN_GNT1, GNT1_MARK), PINMUX_GPIO(GPIO_FN_GNT2, GNT2_MARK), PINMUX_GPIO(GPIO_FN_GNT3, GNT3_MARK), PINMUX_GPIO(GPIO_FN_MMCCLK, MMCCLK_MARK), PINMUX_GPIO(GPIO_FN_D31, D31_MARK), PINMUX_GPIO(GPIO_FN_D30, D30_MARK), PINMUX_GPIO(GPIO_FN_D29, D29_MARK), PINMUX_GPIO(GPIO_FN_D28, D28_MARK), PINMUX_GPIO(GPIO_FN_D27, D27_MARK), PINMUX_GPIO(GPIO_FN_D26, D26_MARK), PINMUX_GPIO(GPIO_FN_D25, D25_MARK), PINMUX_GPIO(GPIO_FN_D24, D24_MARK), PINMUX_GPIO(GPIO_FN_D23, D23_MARK), PINMUX_GPIO(GPIO_FN_D22, D22_MARK), PINMUX_GPIO(GPIO_FN_D21, D21_MARK), PINMUX_GPIO(GPIO_FN_D20, D20_MARK), PINMUX_GPIO(GPIO_FN_D19, D19_MARK), PINMUX_GPIO(GPIO_FN_D18, D18_MARK), PINMUX_GPIO(GPIO_FN_D17, D17_MARK), PINMUX_GPIO(GPIO_FN_D16, D16_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_SCK, SCIF1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_RXD, SCIF1_RXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF1_TXD, SCIF1_TXD_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_CTS, SCIF0_CTS_MARK), PINMUX_GPIO(GPIO_FN_INTD, INTD_MARK), PINMUX_GPIO(GPIO_FN_FCE, FCE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RTS, SCIF0_RTS_MARK), PINMUX_GPIO(GPIO_FN_HSPI_CS, HSPI_CS_MARK), PINMUX_GPIO(GPIO_FN_FSE, FSE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_SCK, SCIF0_SCK_MARK), PINMUX_GPIO(GPIO_FN_HSPI_CLK, HSPI_CLK_MARK), PINMUX_GPIO(GPIO_FN_FRE, FRE_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_RXD, SCIF0_RXD_MARK), PINMUX_GPIO(GPIO_FN_HSPI_RX, HSPI_RX_MARK), PINMUX_GPIO(GPIO_FN_FRB, FRB_MARK), PINMUX_GPIO(GPIO_FN_SCIF0_TXD, SCIF0_TXD_MARK), PINMUX_GPIO(GPIO_FN_HSPI_TX, HSPI_TX_MARK), PINMUX_GPIO(GPIO_FN_FWE, FWE_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_TXD, SCIF5_TXD_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SYNC, HAC1_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SSI1_WS, SSI1_WS_MARK), PINMUX_GPIO(GPIO_FN_SIOF_TXD_PJ, SIOF_TXD_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SDOUT, HAC0_SDOUT_MARK), PINMUX_GPIO(GPIO_FN_SSI0_SDATA, SSI0_SDATA_MARK), PINMUX_GPIO(GPIO_FN_SIOF_RXD_PJ, SIOF_RXD_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SDIN, HAC0_SDIN_MARK), PINMUX_GPIO(GPIO_FN_SSI0_SCK, SSI0_SCK_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SYNC_PJ, SIOF_SYNC_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_SYNC, HAC0_SYNC_MARK), PINMUX_GPIO(GPIO_FN_SSI0_WS, SSI0_WS_MARK), PINMUX_GPIO(GPIO_FN_SIOF_MCLK_PJ, SIOF_MCLK_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC_RES, HAC_RES_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SCK_PJ, SIOF_SCK_PJ_MARK), PINMUX_GPIO(GPIO_FN_HAC0_BITCLK, HAC0_BITCLK_MARK), PINMUX_GPIO(GPIO_FN_SSI0_CLK, SSI0_CLK_MARK), PINMUX_GPIO(GPIO_FN_HAC1_BITCLK, HAC1_BITCLK_MARK), PINMUX_GPIO(GPIO_FN_SSI1_CLK, SSI1_CLK_MARK), PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK), PINMUX_GPIO(GPIO_FN_IOIS16, IOIS16_MARK), PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), PINMUX_GPIO(GPIO_FN_DRAK0_PK3, DRAK0_PK3_MARK), PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), PINMUX_GPIO(GPIO_FN_DRAK1_PK2, DRAK1_PK2_MARK), PINMUX_GPIO(GPIO_FN_DACK2, DACK2_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_TXD, SCIF2_TXD_MARK), PINMUX_GPIO(GPIO_FN_MMCCMD, MMCCMD_MARK), PINMUX_GPIO(GPIO_FN_SIOF_TXD_PK, SIOF_TXD_PK_MARK), PINMUX_GPIO(GPIO_FN_DACK3, DACK3_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_SCK, SCIF2_SCK_MARK), PINMUX_GPIO(GPIO_FN_MMCDAT, MMCDAT_MARK), PINMUX_GPIO(GPIO_FN_SIOF_SCK_PK, SIOF_SCK_PK_MARK), PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), PINMUX_GPIO(GPIO_FN_DRAK0_PK1, DRAK0_PK1_MARK), PINMUX_GPIO(GPIO_FN_DRAK1_PK0, DRAK1_PK0_MARK), PINMUX_GPIO(GPIO_FN_DREQ2, DREQ2_MARK), PINMUX_GPIO(GPIO_FN_INTB, INTB_MARK), PINMUX_GPIO(GPIO_FN_DREQ3, DREQ3_MARK), PINMUX_GPIO(GPIO_FN_INTC, INTC_MARK), PINMUX_GPIO(GPIO_FN_DRAK2, DRAK2_MARK), PINMUX_GPIO(GPIO_FN_CE2A, CE2A_MARK), PINMUX_GPIO(GPIO_FN_IRL4, IRL4_MARK), PINMUX_GPIO(GPIO_FN_FD4, FD4_MARK), PINMUX_GPIO(GPIO_FN_IRL5, IRL5_MARK), PINMUX_GPIO(GPIO_FN_FD5, FD5_MARK), PINMUX_GPIO(GPIO_FN_IRL6, IRL6_MARK), PINMUX_GPIO(GPIO_FN_FD6, FD6_MARK), PINMUX_GPIO(GPIO_FN_IRL7, IRL7_MARK), PINMUX_GPIO(GPIO_FN_FD7, FD7_MARK), PINMUX_GPIO(GPIO_FN_DRAK3, DRAK3_MARK), PINMUX_GPIO(GPIO_FN_CE2B, CE2B_MARK), PINMUX_GPIO(GPIO_FN_BREQ_BSACK, BREQ_BSACK_MARK), PINMUX_GPIO(GPIO_FN_BACK_BSREQ, BACK_BSREQ_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_RXD, SCIF5_RXD_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SDIN, HAC1_SDIN_MARK), PINMUX_GPIO(GPIO_FN_SSI1_SCK, SSI1_SCK_MARK), PINMUX_GPIO(GPIO_FN_SCIF5_SCK, SCIF5_SCK_MARK), PINMUX_GPIO(GPIO_FN_HAC1_SDOUT, HAC1_SDOUT_MARK), PINMUX_GPIO(GPIO_FN_SSI1_SDATA, SSI1_SDATA_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_TXD, SCIF3_TXD_MARK), PINMUX_GPIO(GPIO_FN_FCLE, FCLE_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_RXD, SCIF3_RXD_MARK), PINMUX_GPIO(GPIO_FN_FALE, FALE_MARK), PINMUX_GPIO(GPIO_FN_SCIF3_SCK, SCIF3_SCK_MARK), PINMUX_GPIO(GPIO_FN_FD0, FD0_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_TXD, SCIF4_TXD_MARK), PINMUX_GPIO(GPIO_FN_FD1, FD1_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_RXD, SCIF4_RXD_MARK), PINMUX_GPIO(GPIO_FN_FD2, FD2_MARK), PINMUX_GPIO(GPIO_FN_SCIF4_SCK, SCIF4_SCK_MARK), PINMUX_GPIO(GPIO_FN_FD3, FD3_MARK), PINMUX_GPIO(GPIO_FN_DEVSEL_DCLKOUT, DEVSEL_DCLKOUT_MARK), PINMUX_GPIO(GPIO_FN_STOP_CDE, STOP_CDE_MARK), PINMUX_GPIO(GPIO_FN_LOCK_ODDF, LOCK_ODDF_MARK), PINMUX_GPIO(GPIO_FN_TRDY_DISPL, TRDY_DISPL_MARK), PINMUX_GPIO(GPIO_FN_IRDY_HSYNC, IRDY_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_PCIFRAME_VSYNC, PCIFRAME_VSYNC_MARK), PINMUX_GPIO(GPIO_FN_INTA, INTA_MARK), PINMUX_GPIO(GPIO_FN_GNT0_GNTIN, GNT0_GNTIN_MARK), PINMUX_GPIO(GPIO_FN_REQ0_REQOUT, REQ0_REQOUT_MARK), PINMUX_GPIO(GPIO_FN_PERR, PERR_MARK), PINMUX_GPIO(GPIO_FN_SERR, SERR_MARK), PINMUX_GPIO(GPIO_FN_WE7_CBE3, WE7_CBE3_MARK), PINMUX_GPIO(GPIO_FN_WE6_CBE2, WE6_CBE2_MARK), PINMUX_GPIO(GPIO_FN_WE5_CBE1, WE5_CBE1_MARK), PINMUX_GPIO(GPIO_FN_WE4_CBE0, WE4_CBE0_MARK), PINMUX_GPIO(GPIO_FN_SCIF2_RXD, SCIF2_RXD_MARK), PINMUX_GPIO(GPIO_FN_SIOF_RXD, SIOF_RXD_MARK), PINMUX_GPIO(GPIO_FN_MRESETOUT, MRESETOUT_MARK), PINMUX_GPIO(GPIO_FN_IRQOUT, IRQOUT_MARK), }; static struct pinmux_cfg_reg pinmux_config_regs[] = { { PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) { PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU, PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU, PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU, PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU, PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU, PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU, PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU, PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU } }, { PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) { PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU, PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU, PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU, PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU, PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU, PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU, PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU, PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU } }, { PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) { PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU, PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU, PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU, PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU, PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU, PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU, PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU, PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU } }, { PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) { PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU, PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU, PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU, PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU, PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU, PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU, PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU, PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU } }, { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU, PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU, PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU, PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU, PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU, PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU } }, { PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) { PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU, PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU, PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU, PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU, PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU, PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU, PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU, PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU } }, { PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) { PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU, PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU, PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU, PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU, PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU, PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU, PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU, PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU } }, { PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) { PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU, PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU, PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU, PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU, PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU, PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU, PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU, PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU } }, { PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) { PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU, PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU, PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU, PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU, PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU, PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU, PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU, PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU } }, { PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) { PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU, PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU, PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU, PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU, PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU, PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU, PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU, PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU } }, { PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) { PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU, PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU, PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU, PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU, PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU, PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU, PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU, PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU } }, { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU, PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU } }, { PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) { PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU, PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU, PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU, PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU, PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU, PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU, PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU, PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU } }, { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU, PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU, PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU, PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU, PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU, PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU } }, { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU, PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU, PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU, PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU, PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU } }, { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU, PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU, PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU, PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU } }, { PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) { P1MSEL15_0, P1MSEL15_1, P1MSEL14_0, P1MSEL14_1, P1MSEL13_0, P1MSEL13_1, P1MSEL12_0, P1MSEL12_1, P1MSEL11_0, P1MSEL11_1, P1MSEL10_0, P1MSEL10_1, P1MSEL9_0, P1MSEL9_1, P1MSEL8_0, P1MSEL8_1, P1MSEL7_0, P1MSEL7_1, P1MSEL6_0, P1MSEL6_1, P1MSEL5_0, 0, P1MSEL4_0, P1MSEL4_1, P1MSEL3_0, P1MSEL3_1, P1MSEL2_0, P1MSEL2_1, P1MSEL1_0, P1MSEL1_1, P1MSEL0_0, P1MSEL0_1 } }, { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, P2MSEL2_0, P2MSEL2_1, P2MSEL1_0, P2MSEL1_1, P2MSEL0_0, P2MSEL0_1 } }, {} }; static struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PADR", 0xffe70020, 8) { PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA, PA3_DATA, PA2_DATA, PA1_DATA, PA0_DATA } }, { PINMUX_DATA_REG("PBDR", 0xffe70022, 8) { PB7_DATA, PB6_DATA, PB5_DATA, PB4_DATA, PB3_DATA, PB2_DATA, PB1_DATA, PB0_DATA } }, { PINMUX_DATA_REG("PCDR", 0xffe70024, 8) { PC7_DATA, PC6_DATA, PC5_DATA, PC4_DATA, PC3_DATA, PC2_DATA, PC1_DATA, PC0_DATA } }, { PINMUX_DATA_REG("PDDR", 0xffe70026, 8) { PD7_DATA, PD6_DATA, PD5_DATA, PD4_DATA, PD3_DATA, PD2_DATA, PD1_DATA, PD0_DATA } }, { PINMUX_DATA_REG("PEDR", 0xffe70028, 8) { 0, 0, PE5_DATA, PE4_DATA, PE3_DATA, PE2_DATA, PE1_DATA, PE0_DATA } }, { PINMUX_DATA_REG("PFDR", 0xffe7002a, 8) { PF7_DATA, PF6_DATA, PF5_DATA, PF4_DATA, PF3_DATA, PF2_DATA, PF1_DATA, PF0_DATA } }, { PINMUX_DATA_REG("PGDR", 0xffe7002c, 8) { PG7_DATA, PG6_DATA, PG5_DATA, PG4_DATA, PG3_DATA, PG2_DATA, PG1_DATA, PG0_DATA } }, { PINMUX_DATA_REG("PHDR", 0xffe7002e, 8) { PH7_DATA, PH6_DATA, PH5_DATA, PH4_DATA, PH3_DATA, PH2_DATA, PH1_DATA, PH0_DATA } }, { PINMUX_DATA_REG("PJDR", 0xffe70030, 8) { PJ7_DATA, PJ6_DATA, PJ5_DATA, PJ4_DATA, PJ3_DATA, PJ2_DATA, PJ1_DATA, PJ0_DATA } }, { PINMUX_DATA_REG("PKDR", 0xffe70032, 8) { PK7_DATA, PK6_DATA, PK5_DATA, PK4_DATA, PK3_DATA, PK2_DATA, PK1_DATA, PK0_DATA } }, { PINMUX_DATA_REG("PLDR", 0xffe70034, 8) { PL7_DATA, PL6_DATA, PL5_DATA, PL4_DATA, PL3_DATA, PL2_DATA, PL1_DATA, PL0_DATA } }, { PINMUX_DATA_REG("PMDR", 0xffe70036, 8) { 0, 0, 0, 0, 0, 0, PM1_DATA, PM0_DATA } }, { PINMUX_DATA_REG("PNDR", 0xffe70038, 8) { PN7_DATA, PN6_DATA, PN5_DATA, PN4_DATA, PN3_DATA, PN2_DATA, PN1_DATA, PN0_DATA } }, { PINMUX_DATA_REG("PPDR", 0xffe7003a, 8) { 0, 0, PP5_DATA, PP4_DATA, PP3_DATA, PP2_DATA, PP1_DATA, PP0_DATA } }, { PINMUX_DATA_REG("PQDR", 0xffe7003c, 8) { 0, 0, 0, PQ4_DATA, PQ3_DATA, PQ2_DATA, PQ1_DATA, PQ0_DATA } }, { PINMUX_DATA_REG("PRDR", 0xffe7003e, 8) { 0, 0, 0, 0, PR3_DATA, PR2_DATA, PR1_DATA, PR0_DATA } }, { }, }; static struct pinmux_info sh7785_pinmux_info = { .name = "sh7785_pfc", .reserved_id = PINMUX_RESERVED, .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .first_gpio = GPIO_PA7, .last_gpio = GPIO_FN_IRQOUT, .gpios = pinmux_gpios, .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), }; static int __init plat_pinmux_setup(void) { return register_pinmux(&sh7785_pinmux_info); } arch_initcall(plat_pinmux_setup);
gpl-2.0
MozOpenHard/linux-rockchip
sound/pci/oxygen/xonar_hdmi.c
11025
3495
/* * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim) * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this driver; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/pci.h> #include <linux/delay.h> #include <sound/asoundef.h> #include <sound/control.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include "xonar.h" static void hdmi_write_command(struct oxygen *chip, u8 command, unsigned int count, const u8 *params) { unsigned int i; u8 checksum; oxygen_write_uart(chip, 0xfb); oxygen_write_uart(chip, 0xef); oxygen_write_uart(chip, command); oxygen_write_uart(chip, count); for (i = 0; i < count; ++i) oxygen_write_uart(chip, params[i]); checksum = 0xfb + 0xef + command + count; for (i = 0; i < count; ++i) checksum += params[i]; oxygen_write_uart(chip, checksum); } static void xonar_hdmi_init_commands(struct oxygen *chip, struct xonar_hdmi *hdmi) { u8 param; oxygen_reset_uart(chip); param = 0; hdmi_write_command(chip, 0x61, 1, &param); param = 1; hdmi_write_command(chip, 0x74, 1, &param); hdmi_write_command(chip, 0x54, 5, hdmi->params); } void xonar_hdmi_init(struct oxygen *chip, struct xonar_hdmi *hdmi) { hdmi->params[1] = IEC958_AES3_CON_FS_48000; hdmi->params[4] = 1; xonar_hdmi_init_commands(chip, hdmi); } void xonar_hdmi_cleanup(struct oxygen *chip) { u8 param = 0; hdmi_write_command(chip, 0x74, 1, &param); } void xonar_hdmi_resume(struct oxygen *chip, struct xonar_hdmi *hdmi) { xonar_hdmi_init_commands(chip, hdmi); } void xonar_hdmi_pcm_hardware_filter(unsigned int channel, struct snd_pcm_hardware *hardware) { if (channel == PCM_MULTICH) { hardware->rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000; hardware->rate_min = 44100; } } void xonar_set_hdmi_params(struct oxygen *chip, struct xonar_hdmi *hdmi, struct snd_pcm_hw_params *params) { hdmi->params[0] = 0; /* 1 = non-audio */ switch (params_rate(params)) { case 44100: hdmi->params[1] = IEC958_AES3_CON_FS_44100; break; case 48000: hdmi->params[1] = IEC958_AES3_CON_FS_48000; break; default: /* 96000 */ hdmi->params[1] = IEC958_AES3_CON_FS_96000; break; case 192000: hdmi->params[1] = IEC958_AES3_CON_FS_192000; break; } hdmi->params[2] = params_channels(params) / 2 - 1; if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) hdmi->params[3] = 0; else hdmi->params[3] = 0xc0; hdmi->params[4] = 1; /* ? */ hdmi_write_command(chip, 0x54, 5, hdmi->params); } void xonar_hdmi_uart_input(struct oxygen *chip) { if (chip->uart_input_count >= 2 && chip->uart_input[chip->uart_input_count - 2] == 'O' && chip->uart_input[chip->uart_input_count - 1] == 'K') { printk(KERN_DEBUG "message from HDMI chip received:\n"); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, chip->uart_input, chip->uart_input_count); chip->uart_input_count = 0; } }
gpl-2.0
Celelibi/syslinux
com32/lua/src/cpu.c
18
5466
#include <stdlib.h> #include <string.h> #define llua_cpu /* Define the library */ /* Include the Lua API header files */ #include"lua.h" #include"lauxlib.h" #include"lualib.h" #include"cpuid.h" static void add_string_item(lua_State *L, const char *item, const char *value_str) { lua_pushstring(L,item); lua_pushstring(L,value_str); lua_settable(L,-3); } static void add_int_item(lua_State *L, const char *item, int value_int) { lua_pushstring(L,item); lua_pushnumber(L,value_int); lua_settable(L,-3); } static void add_flag(lua_State *L, bool value, const char *value_str) { char buffer[32] = { 0 }; snprintf(buffer,sizeof(buffer), "flags.%s",value_str); lua_pushstring(L,buffer); // printf("%s=%d\n",value_str,value); if (value == true) { lua_pushstring(L,"yes"); } else { lua_pushstring(L,"no"); } lua_settable(L,-3); } static int cpu_getflags(lua_State *L) { s_cpu lua_cpu; detect_cpu(&lua_cpu); lua_newtable(L); add_string_item(L, "vendor", lua_cpu.vendor); add_string_item(L, "model", lua_cpu.model); add_int_item(L, "cores", lua_cpu.num_cores); add_int_item(L, "l1_instruction_cache", lua_cpu.l1_instruction_cache_size); add_int_item(L, "l1_data_cache", lua_cpu.l1_data_cache_size); add_int_item(L, "l2_cache", lua_cpu.l2_cache_size); add_int_item(L, "family_id", lua_cpu.family); add_int_item(L, "model_id", lua_cpu.model_id); add_int_item(L, "stepping", lua_cpu.stepping); add_flag(L, lua_cpu.flags.fpu, "fpu"); add_flag(L, lua_cpu.flags.vme, "vme"); add_flag(L, lua_cpu.flags.de, "de"); add_flag(L, lua_cpu.flags.pse, "pse"); add_flag(L, lua_cpu.flags.tsc, "tsc"); add_flag(L, lua_cpu.flags.msr, "msr"); add_flag(L, lua_cpu.flags.pae, "pae"); add_flag(L, lua_cpu.flags.mce, "mce"); add_flag(L, lua_cpu.flags.cx8, "cx8"); add_flag(L, lua_cpu.flags.apic, "apic"); add_flag(L, lua_cpu.flags.sep, "sep"); add_flag(L, lua_cpu.flags.mtrr, "mtrr"); add_flag(L, lua_cpu.flags.pge, "pge"); add_flag(L, lua_cpu.flags.mca, "mca"); add_flag(L, lua_cpu.flags.cmov, "cmov"); add_flag(L, lua_cpu.flags.pat, "pat"); add_flag(L, lua_cpu.flags.pse_36, "pse_36"); add_flag(L, lua_cpu.flags.psn, "psn"); add_flag(L, lua_cpu.flags.clflsh, "clflsh"); add_flag(L, lua_cpu.flags.dts, "dts"); add_flag(L, lua_cpu.flags.acpi, "acpi"); add_flag(L, lua_cpu.flags.mmx, "mmx"); add_flag(L, lua_cpu.flags.sse, "sse"); add_flag(L, lua_cpu.flags.sse2, "sse2"); add_flag(L, lua_cpu.flags.ss, "ss"); add_flag(L, lua_cpu.flags.htt, "ht"); add_flag(L, lua_cpu.flags.acc, "acc"); add_flag(L, lua_cpu.flags.syscall, "syscall"); add_flag(L, lua_cpu.flags.mp, "mp"); add_flag(L, lua_cpu.flags.nx, "nx"); add_flag(L, lua_cpu.flags.mmxext, "mmxext"); add_flag(L, lua_cpu.flags.lm, "lm"); add_flag(L, lua_cpu.flags.nowext, "3dnowext"); add_flag(L, lua_cpu.flags.now, "3dnow!"); add_flag(L, lua_cpu.flags.svm, "svm"); add_flag(L, lua_cpu.flags.vmx, "vmx"); add_flag(L, lua_cpu.flags.pbe, "pbe"); add_flag(L, lua_cpu.flags.fxsr_opt, "fxsr_opt"); add_flag(L, lua_cpu.flags.gbpages, "gbpages"); add_flag(L, lua_cpu.flags.rdtscp, "rdtscp"); add_flag(L, lua_cpu.flags.pni, "pni"); add_flag(L, lua_cpu.flags.pclmulqd, "pclmulqd"); add_flag(L, lua_cpu.flags.dtes64, "dtes64"); add_flag(L, lua_cpu.flags.smx, "smx"); add_flag(L, lua_cpu.flags.est, "est"); add_flag(L, lua_cpu.flags.tm2, "tm2"); add_flag(L, lua_cpu.flags.sse3, "sse3"); add_flag(L, lua_cpu.flags.fma, "fma"); add_flag(L, lua_cpu.flags.cx16, "cx16"); add_flag(L, lua_cpu.flags.xtpr, "xtpr"); add_flag(L, lua_cpu.flags.pdcm, "pdcm"); add_flag(L, lua_cpu.flags.dca, "dca"); add_flag(L, lua_cpu.flags.xmm4_1, "xmm4_1"); add_flag(L, lua_cpu.flags.xmm4_2, "xmm4_2"); add_flag(L, lua_cpu.flags.x2apic, "x2apic"); add_flag(L, lua_cpu.flags.movbe, "movbe"); add_flag(L, lua_cpu.flags.popcnt, "popcnt"); add_flag(L, lua_cpu.flags.aes, "aes"); add_flag(L, lua_cpu.flags.xsave, "xsave"); add_flag(L, lua_cpu.flags.osxsave, "osxsave"); add_flag(L, lua_cpu.flags.avx, "avx"); add_flag(L, lua_cpu.flags.hypervisor, "hypervisor"); add_flag(L, lua_cpu.flags.ace2, "ace2"); add_flag(L, lua_cpu.flags.ace2_en, "ace2_en"); add_flag(L, lua_cpu.flags.phe, "phe"); add_flag(L, lua_cpu.flags.phe_en, "phe_en"); add_flag(L, lua_cpu.flags.pmm, "pmm"); add_flag(L, lua_cpu.flags.pmm_en, "pmm_en"); add_flag(L, lua_cpu.flags.extapic, "extapic"); add_flag(L, lua_cpu.flags.cr8_legacy, "cr8_legacy"); add_flag(L, lua_cpu.flags.abm, "abm"); add_flag(L, lua_cpu.flags.sse4a, "sse4a"); add_flag(L, lua_cpu.flags.misalignsse, "misalignsse"); add_flag(L, lua_cpu.flags.nowprefetch, "3dnowprefetch"); add_flag(L, lua_cpu.flags.osvw, "osvw"); add_flag(L, lua_cpu.flags.ibs, "ibs"); add_flag(L, lua_cpu.flags.sse5, "sse5"); add_flag(L, lua_cpu.flags.skinit, "skinit"); add_flag(L, lua_cpu.flags.wdt, "wdt"); add_flag(L, lua_cpu.flags.ida, "ida"); add_flag(L, lua_cpu.flags.arat, "arat"); add_flag(L, lua_cpu.flags.tpr_shadow, "tpr_shadow"); add_flag(L, lua_cpu.flags.vnmi, "vnmi"); add_flag(L, lua_cpu.flags.flexpriority, "flexpriority"); add_flag(L, lua_cpu.flags.ept, "ept"); add_flag(L, lua_cpu.flags.vpid, "vpid"); /* return number of return values on stack */ return 1; } static const luaL_Reg cpulib[] = { {"flags", cpu_getflags}, {NULL, NULL} }; LUALIB_API int luaopen_cpu(lua_State *L) { luaL_newlib(L, cpulib); return 1; }
gpl-2.0
kykc/m7u-3.4.10-g4dad4ce
drivers/media/video/msm/msm_mem.c
18
11308
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/list.h> #include <linux/ioctl.h> #include <linux/spinlock.h> #include <linux/videodev2.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <linux/android_pmem.h> #include "msm.h" #ifdef CONFIG_MSM_CAMERA_DEBUG #define D(fmt, args...) pr_debug("msm_isp: " fmt, ##args) #else #define D(fmt, args...) do {} while (0) #endif #define PAD_TO_WORD(a) (((a) + 3) & ~3) #define __CONTAINS(r, v, l, field) ({ \ typeof(r) __r = r; \ typeof(v) __v = v; \ typeof(v) __e = __v + l; \ int res = __v >= __r->field && \ __e <= __r->field + __r->len; \ res; \ }) #define CONTAINS(r1, r2, field) ({ \ typeof(r2) __r2 = r2; \ __CONTAINS(r1, __r2->field, __r2->len, field); \ }) #define IN_RANGE(r, v, field) ({ \ typeof(r) __r = r; \ typeof(v) __vv = v; \ int res = ((__vv >= __r->field) && \ (__vv < (__r->field + __r->len))); \ res; \ }) #define OVERLAPS(r1, r2, field) ({ \ typeof(r1) __r1 = r1; \ typeof(r2) __r2 = r2; \ typeof(__r2->field) __v = __r2->field; \ typeof(__v) __e = __v + __r2->len - 1; \ int res = (IN_RANGE(__r1, __v, field) || \ IN_RANGE(__r1, __e, field)); \ res; \ }) static DEFINE_MUTEX(hlist_mut); #ifdef CONFIG_ANDROID_PMEM static int check_pmem_info(struct msm_pmem_info *info, int len) { if (info->offset < len && info->offset + info->len <= len && info->planar0_off < len && info->planar1_off < len) return 0; pr_err("%s: check failed: off %d len %d y %d cbcr %d (total len %d)\n", __func__, info->offset, info->len, info->planar0_off, info->planar1_off, len); return -EINVAL; } #endif static int check_overlap(struct hlist_head *ptype, unsigned long paddr, unsigned long len) { struct msm_pmem_region *region; struct msm_pmem_region t = { .paddr = paddr, .len = len }; struct hlist_node *node; hlist_for_each_entry(region, node, ptype, list) { if (CONTAINS(region, &t, paddr) || CONTAINS(&t, region, paddr) || OVERLAPS(region, &t, paddr)) { CDBG(" region (PHYS %p len %ld)" " clashes with registered region" " (paddr %p len %ld)\n", (void *)t.paddr, t.len, (void *)region->paddr, region->len); return -EINVAL; } } return 0; } static int msm_pmem_table_add(struct hlist_head *ptype, struct msm_pmem_info *info, struct ion_client *client) { unsigned long paddr; #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION unsigned long kvstart; struct file *file; #endif int rc = -ENOMEM; unsigned long len; struct msm_pmem_region *region; unsigned long ionflag; void *vaddr; region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL); if (!region) goto out; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION region->handle = ion_import_dma_buf(client, info->fd); if (IS_ERR_OR_NULL(region->handle)) goto out1; if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, &paddr, &len, 0 /* UNCACHED */, 0) < 0) goto out2; rc = ion_handle_get_flags(client, region->handle, &ionflag); if (rc) { pr_err("%s: could not get flags for the handle\n", __func__); return 0; } D("ionflag=%ld\n", ionflag); vaddr = ion_map_kernel(client, region->handle); if (IS_ERR_OR_NULL(vaddr)) { pr_err("%s: could not get virtual address\n", __func__); return 0; } region->vaddr = (unsigned long) vaddr; #elif CONFIG_ANDROID_PMEM rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file); if (rc < 0) { pr_err("%s: get_pmem_file fd %d error %d\n", __func__, info->fd, rc); goto out1; } region->file = file; #else paddr = 0; file = NULL; kvstart = 0; #endif if (!info->len) info->len = len; rc = check_pmem_info(info, len); if (rc < 0) goto out3; paddr += info->offset; len = info->len; if (check_overlap(ptype, paddr, len) < 0) { rc = -EINVAL; goto out3; } CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n", __func__, info->type, info->active, paddr, (unsigned long)info->vaddr); INIT_HLIST_NODE(&region->list); region->paddr = paddr; region->len = len; memcpy(&region->info, info, sizeof(region->info)); D("%s Adding region to list with type %d\n", __func__, region->info.type); D("%s pmem_stats address is 0x%p\n", __func__, ptype); hlist_add_head(&(region->list), ptype); return 0; out3: #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL); #endif #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION out2: ion_free(client, region->handle); #elif CONFIG_ANDROID_PMEM put_pmem_file(region->file); #endif out1: kfree(region); out: return rc; } static int __msm_register_pmem(struct hlist_head *ptype, struct msm_pmem_info *pinfo, struct ion_client *client) { int rc = 0; switch (pinfo->type) { case MSM_PMEM_AF: case MSM_PMEM_AEC: case MSM_PMEM_AWB: case MSM_PMEM_RS: case MSM_PMEM_CS: case MSM_PMEM_IHIST: case MSM_PMEM_SKIN: case MSM_PMEM_AEC_AWB: //QCT - BAYER STATS - MB case MSM_PMEM_BAYER_GRID: case MSM_PMEM_BAYER_FOCUS: case MSM_PMEM_BAYER_HIST: //QCT - BAYER STATS - MB rc = msm_pmem_table_add(ptype, pinfo, client); break; default: rc = -EINVAL; break; } return rc; } static int __msm_pmem_table_del(struct hlist_head *ptype, struct msm_pmem_info *pinfo, struct ion_client *client) { int rc = 0; struct msm_pmem_region *region; struct hlist_node *node, *n; switch (pinfo->type) { case MSM_PMEM_AF: case MSM_PMEM_AEC: case MSM_PMEM_AWB: case MSM_PMEM_RS: case MSM_PMEM_CS: case MSM_PMEM_IHIST: case MSM_PMEM_SKIN: case MSM_PMEM_AEC_AWB: //QCT - BAYER STATS - MB case MSM_PMEM_BAYER_GRID: case MSM_PMEM_BAYER_FOCUS: case MSM_PMEM_BAYER_HIST: //QCT - BAYER STATS - ME hlist_for_each_entry_safe(region, node, n, ptype, list) { if (pinfo->type == region->info.type && pinfo->vaddr == region->info.vaddr && pinfo->fd == region->info.fd) { hlist_del(node); #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL); ion_free(client, region->handle); #else put_pmem_file(region->file); #endif kfree(region); } } break; default: rc = -EINVAL; break; } return rc; } /* return of 0 means failure */ uint8_t msm_pmem_region_lookup(struct hlist_head *ptype, int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount) { struct msm_pmem_region *region; struct msm_pmem_region *regptr; struct hlist_node *node, *n; uint8_t rc = 0; D("%s\n", __func__); regptr = reg; mutex_lock(&hlist_mut); hlist_for_each_entry_safe(region, node, n, ptype, list) { if (region->info.type == pmem_type && region->info.active) { *regptr = *region; rc += 1; if (rc >= maxcount) break; regptr++; } } D("%s finished, rc=%d\n", __func__, rc); mutex_unlock(&hlist_mut); return rc; } int msm_pmem_region_get_phy_addr(struct hlist_head *ptype, struct msm_mem_map_info *mem_map, int32_t *phyaddr) { struct msm_pmem_region *region; struct hlist_node *node, *n; int pmem_type = mem_map->mem_type; int rc = -EFAULT; D("%s\n", __func__); *phyaddr = 0; mutex_lock(&hlist_mut); hlist_for_each_entry_safe(region, node, n, ptype, list) { if (region->info.type == pmem_type && (uint32_t)region->info.vaddr == mem_map->cookie) { *phyaddr = (int32_t)region->paddr; rc = 0; break; } } D("%s finished, phy_addr = 0x%x, rc=%d\n", __func__, *phyaddr, rc); mutex_unlock(&hlist_mut); return rc; } uint8_t msm_pmem_region_lookup_2(struct hlist_head *ptype, int pmem_type, struct msm_pmem_region *reg, uint8_t maxcount) { struct msm_pmem_region *region; struct msm_pmem_region *regptr; struct hlist_node *node, *n; uint8_t rc = 0; regptr = reg; mutex_lock(&hlist_mut); hlist_for_each_entry_safe(region, node, n, ptype, list) { D("Mio: info.type=%d, pmem_type = %d," "info.active = %d\n", region->info.type, pmem_type, region->info.active); if (region->info.type == pmem_type && region->info.active) { D("info.type=%d, pmem_type = %d," "info.active = %d,\n", region->info.type, pmem_type, region->info.active); *regptr = *region; region->info.type = MSM_PMEM_VIDEO; rc += 1; if (rc >= maxcount) break; regptr++; } } mutex_unlock(&hlist_mut); return rc; } unsigned long msm_pmem_stats_vtop_lookup( struct msm_cam_media_controller *mctl, unsigned long buffer, int fd) { struct msm_pmem_region *region; struct hlist_node *node, *n; hlist_for_each_entry_safe(region, node, n, &mctl->stats_info.pmem_stats_list, list) { if (((unsigned long)(region->info.vaddr) == buffer) && (region->info.fd == fd) && region->info.active == 0) { region->info.active = 1; return region->paddr; } } return 0; } unsigned long msm_pmem_stats_ptov_lookup( struct msm_cam_media_controller *mctl, unsigned long addr, int *fd) { struct msm_pmem_region *region; struct hlist_node *node, *n; hlist_for_each_entry_safe(region, node, n, &mctl->stats_info.pmem_stats_list, list) { if (addr == region->paddr && region->info.active) { /* offset since we could pass vaddr inside a * registered pmem buffer */ *fd = region->info.fd; region->info.active = 0; return (unsigned long)(region->info.vaddr); } } if (addr != 0) pr_err("%s: abnormal addr == 0X%x\n", __func__, (uint32_t)addr); return 0; } unsigned long msm_pmem_stats_ptov_lookup_2( struct msm_cam_media_controller *mctl, unsigned long addr, int *fd) { struct msm_pmem_region *region; struct hlist_node *node, *n; hlist_for_each_entry_safe(region, node, n, &mctl->stats_info.pmem_stats_list, list) { if (addr == region->paddr) { /* offset since we could pass vaddr inside a * registered pmem buffer */ *fd = region->info.fd; return (unsigned long)(region->vaddr); } } if (addr != 0) pr_err("%s: abnormal addr == 0X%x\n", __func__, (uint32_t)addr); return 0; } int msm_register_pmem(struct hlist_head *ptype, void __user *arg, struct ion_client *client) { struct msm_pmem_info info; if (copy_from_user(&info, arg, sizeof(info))) { ERR_COPY_FROM_USER(); return -EFAULT; } return __msm_register_pmem(ptype, &info, client); } EXPORT_SYMBOL(msm_register_pmem); int msm_pmem_table_del(struct hlist_head *ptype, void __user *arg, struct ion_client *client) { struct msm_pmem_info info; if (copy_from_user(&info, arg, sizeof(info))) { ERR_COPY_FROM_USER(); return -EFAULT; } return __msm_pmem_table_del(ptype, &info, client); } EXPORT_SYMBOL(msm_pmem_table_del);
gpl-2.0
pcacjr/syslinux
core/fs/ufs/bmap.c
18
5632
/* * Copyright (C) 2013 Raphael S. Carvalho <raphael.scarv@gmail.com> * * Partially taken from fs/ext2/bmap.c * This file was modified according UFS1/2 needs. * * Copyright (C) 2009 Liu Aleaxander -- All rights reserved. This file * may be redistributed under the terms of the GNU Public License. */ #include <stdio.h> #include <dprintf.h> #include <fs.h> #include <disk.h> #include <cache.h> #include "ufs.h" /* * Copy blk address into buffer, this was needed since UFS1/2 addr size * in blk maps differs from each other (32/64 bits respectivelly). */ static inline uint64_t get_blkaddr (const uint8_t *blk, uint32_t index, uint32_t shift) { uint64_t addr = 0; memcpy((uint8_t *) &addr, (uint8_t *) blk + (index << shift), 1 << shift); return addr; } /* * Scan forward in a range of blocks to see if they are contiguous, * then return the initial value. */ static uint64_t scan_set_nblocks(const uint8_t *map, uint32_t index, uint32_t addr_shift, unsigned int count, size_t *nblocks) { uint64_t addr; uint64_t blk = get_blkaddr(map, index, addr_shift); /* * Block spans 8 fragments, then address is interleaved by 8. * This code works for either 32/64 sized addresses. */ if (nblocks) { uint32_t skip = blk ? FRAGMENTS_PER_BLK : 0; uint32_t next = blk + skip; size_t cnt = 1; /* Get address of starting blk pointer */ map += (index << addr_shift); ufs_debug("[scan] start blk: %u\n", blk); ufs_debug("[scan] count (nr of blks): %u\n", count); /* Go up to the end of blk map */ while (--count) { map += 1 << addr_shift; addr = get_blkaddr(map, 0, addr_shift); #if 0 /* Extra debugging info (Too much prints) */ ufs_debug("[scan] addr: %u next: %u\n", addr, next); #endif if (addr == next) { cnt++; next += skip; } else { break; } } *nblocks = cnt; ufs_debug("[scan] nblocks: %u\n", cnt); ufs_debug("[scan] end blk: %u\n", next - FRAGMENTS_PER_BLK); } return blk; } /* * The actual indirect block map handling - the block passed in should * be relative to the beginning of the particular block hierarchy. * * @shft_per_blk: shift to get nr. of addresses in a block. * @mask_per_blk: mask to limit the max nr. of addresses in a block. * @addr_count: nr. of addresses in a block. */ static uint64_t bmap_indirect(struct fs_info *fs, uint64_t start, uint32_t block, int levels, size_t *nblocks) { uint32_t shft_per_blk = fs->block_shift - UFS_SB(fs)->addr_shift; uint32_t addr_count = (1 << shft_per_blk); uint32_t mask_per_blk = addr_count - 1; const uint8_t *blk = NULL; uint32_t index = 0; while (levels--) { if (!start) { if (nblocks) *nblocks = addr_count << (levels * shft_per_blk); return 0; } blk = get_cache(fs->fs_dev, frag_to_blk(fs, start)); index = (block >> (levels * shft_per_blk)) & mask_per_blk; start = get_blkaddr(blk, index, UFS_SB(fs)->addr_shift); } return scan_set_nblocks(blk, index, UFS_SB(fs)->addr_shift, addr_count - index, nblocks); } /* * Handle the traditional block map, like indirect, double indirect * and triple indirect */ uint64_t ufs_bmap (struct inode *inode, block_t block, size_t *nblocks) { uint32_t shft_per_blk, ptrs_per_blk; static uint32_t indir_blks, double_blks, triple_blks; struct fs_info *fs = inode->fs; /* Initialize static values */ if (!indir_blks) { shft_per_blk = fs->block_shift - UFS_SB(fs)->addr_shift; ptrs_per_blk = fs->block_size >> UFS_SB(fs)->addr_shift; indir_blks = ptrs_per_blk; double_blks = ptrs_per_blk << shft_per_blk; triple_blks = double_blks << shft_per_blk; } /* * direct blocks * (UFS2_ADDR_SHIFT) is also used for UFS1 because its direct ptr array * was extended to 64 bits. */ if (block < UFS_DIRECT_BLOCKS) return scan_set_nblocks((uint8_t *) PVT(inode)->direct_blk_ptr, block, UFS2_ADDR_SHIFT, UFS_DIRECT_BLOCKS - block, nblocks); /* indirect blocks */ block -= UFS_DIRECT_BLOCKS; if (block < indir_blks) return bmap_indirect(fs, PVT(inode)->indirect_blk_ptr, block, 1, nblocks); /* double indirect blocks */ block -= indir_blks; if (block < double_blks) return bmap_indirect(fs, PVT(inode)->double_indirect_blk_ptr, block, 2, nblocks); /* triple indirect blocks */ block -= double_blks; if (block < triple_blks) return bmap_indirect(fs, PVT(inode)->triple_indirect_blk_ptr, block, 3, nblocks); /* This can't happen... */ return 0; } /* * Next extent for getfssec * "Remaining sectors" means (lstart & blkmask). */ int ufs_next_extent(struct inode *inode, uint32_t lstart) { struct fs_info *fs = inode->fs; int blktosec = BLOCK_SHIFT(fs) - SECTOR_SHIFT(fs); int frag_shift = BLOCK_SHIFT(fs) - UFS_SB(fs)->c_blk_frag_shift; int blkmask = (1 << blktosec) - 1; block_t block; size_t nblocks = 0; ufs_debug("ufs_next_extent:\n"); block = ufs_bmap(inode, lstart >> blktosec, &nblocks); ufs_debug("blk: %u\n", block); if (!block) // Sparse block inode->next_extent.pstart = EXTENT_ZERO; else /* * Convert blk into sect addr and add the remaining * sectors into pstart (sector start address). */ inode->next_extent.pstart = ((sector_t) (block << (frag_shift - SECTOR_SHIFT(fs)))) | (lstart & blkmask); /* * Subtract the remaining sectors from len since these sectors * were added to pstart (sector start address). */ inode->next_extent.len = (nblocks << blktosec) - (lstart & blkmask); return 0; }
gpl-2.0
omeid/binutils-gdb
gdb/i386-sol2-tdep.c
18
5031
/* Target-dependent code for Solaris x86. Copyright (C) 2002-2014 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "defs.h" #include "value.h" #include "osabi.h" #include "sol2-tdep.h" #include "i386-tdep.h" #include "solib-svr4.h" /* From <ia32/sys/reg.h>. */ static int i386_sol2_gregset_reg_offset[] = { 11 * 4, /* %eax */ 10 * 4, /* %ecx */ 9 * 4, /* %edx */ 8 * 4, /* %ebx */ 17 * 4, /* %esp */ 6 * 4, /* %ebp */ 5 * 4, /* %esi */ 4 * 4, /* %edi */ 14 * 4, /* %eip */ 16 * 4, /* %eflags */ 15 * 4, /* %cs */ 18 * 4, /* %ss */ 3 * 4, /* %ds */ 2 * 4, /* %es */ 1 * 4, /* %fs */ 0 * 4 /* %gs */ }; /* Return whether THIS_FRAME corresponds to a Solaris sigtramp routine. */ static int i386_sol2_sigtramp_p (struct frame_info *this_frame) { CORE_ADDR pc = get_frame_pc (this_frame); const char *name; find_pc_partial_function (pc, &name, NULL, NULL); return (name && (strcmp ("sigacthandler", name) == 0 || strcmp (name, "ucbsigvechandler") == 0)); } /* Solaris doesn't have a `struct sigcontext', but it does have a `mcontext_t' that contains the saved set of machine registers. */ static CORE_ADDR i386_sol2_mcontext_addr (struct frame_info *this_frame) { CORE_ADDR sp, ucontext_addr; sp = get_frame_register_unsigned (this_frame, I386_ESP_REGNUM); ucontext_addr = get_frame_memory_unsigned (this_frame, sp + 8, 4); return ucontext_addr + 36; } /* SunPRO encodes the static variables. This is not related to C++ mangling, it is done for C too. */ static const char * i386_sol2_static_transform_name (const char *name) { char *p; if (name[0] == '.') { /* For file-local statics there will be a period, a bunch of junk (the contents of which match a string given in the N_OPT), a period and the name. For function-local statics there will be a bunch of junk (which seems to change the second character from 'A' to 'B'), a period, the name of the function, and the name. So just skip everything before the last period. */ p = strrchr (name, '.'); if (p != NULL) name = p + 1; } return name; } /* Solaris 2. */ static void i386_sol2_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch) { struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); /* Solaris is SVR4-based. */ i386_svr4_init_abi (info, gdbarch); /* The SunPRO compiler puts out 0 instead of the address in N_SO symbols, and for SunPRO 3.0, N_FUN symbols too. */ set_gdbarch_sofun_address_maybe_missing (gdbarch, 1); /* Handle SunPRO encoding of static symbols. */ set_gdbarch_static_transform_name (gdbarch, i386_sol2_static_transform_name); /* Solaris reserves space for its FPU emulator in `fpregset_t'. There is also some space reserved for the registers of a Weitek math coprocessor. */ tdep->gregset_reg_offset = i386_sol2_gregset_reg_offset; tdep->gregset_num_regs = ARRAY_SIZE (i386_sol2_gregset_reg_offset); tdep->sizeof_gregset = 19 * 4; tdep->sizeof_fpregset = 380; /* Signal trampolines are slightly different from SVR4. */ tdep->sigtramp_p = i386_sol2_sigtramp_p; tdep->sigcontext_addr = i386_sol2_mcontext_addr; tdep->sc_reg_offset = tdep->gregset_reg_offset; tdep->sc_num_regs = tdep->gregset_num_regs; /* Solaris has SVR4-style shared libraries. */ set_gdbarch_skip_solib_resolver (gdbarch, sol2_skip_solib_resolver); set_solib_svr4_fetch_link_map_offsets (gdbarch, svr4_ilp32_fetch_link_map_offsets); /* How to print LWP PTIDs from core files. */ set_gdbarch_core_pid_to_str (gdbarch, sol2_core_pid_to_str); } static enum gdb_osabi i386_sol2_osabi_sniffer (bfd *abfd) { /* If we have a section named .SUNW_version, then it is almost certainly Solaris 2. */ if (bfd_get_section_by_name (abfd, ".SUNW_version")) return GDB_OSABI_SOLARIS; return GDB_OSABI_UNKNOWN; } /* Provide a prototype to silence -Wmissing-prototypes. */ void _initialize_i386_sol2_tdep (void); void _initialize_i386_sol2_tdep (void) { /* Register an ELF OS ABI sniffer for Solaris 2 binaries. */ gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_elf_flavour, i386_sol2_osabi_sniffer); gdbarch_register_osabi (bfd_arch_i386, 0, GDB_OSABI_SOLARIS, i386_sol2_init_abi); }
gpl-2.0
weimenlove/linux-2.6.24.4
drivers/leds/leds-gpio.c
18
4535
/* * LEDs driver for GPIOs * * Copyright (C) 2007 8D Technologies inc. * Raphael Assenat <raph@8d.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/workqueue.h> #include <asm/gpio.h> struct gpio_led_data { struct led_classdev cdev; unsigned gpio; struct work_struct work; u8 new_level; u8 can_sleep; u8 active_low; }; static void gpio_led_work(struct work_struct *work) { struct gpio_led_data *led_dat = container_of(work, struct gpio_led_data, work); gpio_set_value_cansleep(led_dat->gpio, led_dat->new_level); } static void gpio_led_set(struct led_classdev *led_cdev, enum led_brightness value) { struct gpio_led_data *led_dat = container_of(led_cdev, struct gpio_led_data, cdev); int level; if (value == LED_OFF) level = 0; else level = 1; if (led_dat->active_low) level = !level; /* setting GPIOs with I2C/etc requires a preemptible task context */ if (led_dat->can_sleep) { if (preempt_count()) { led_dat->new_level = level; schedule_work(&led_dat->work); } else gpio_set_value_cansleep(led_dat->gpio, level); } else gpio_set_value(led_dat->gpio, level); } static int gpio_led_probe(struct platform_device *pdev) { struct gpio_led_platform_data *pdata = pdev->dev.platform_data; struct gpio_led *cur_led; struct gpio_led_data *leds_data, *led_dat; int i, ret = 0; if (!pdata) return -EBUSY; leds_data = kzalloc(sizeof(struct gpio_led_data) * pdata->num_leds, GFP_KERNEL); if (!leds_data) return -ENOMEM; for (i = 0; i < pdata->num_leds; i++) { cur_led = &pdata->leds[i]; led_dat = &leds_data[i]; led_dat->cdev.name = cur_led->name; led_dat->cdev.default_trigger = cur_led->default_trigger; led_dat->gpio = cur_led->gpio; led_dat->can_sleep = gpio_cansleep(cur_led->gpio); led_dat->active_low = cur_led->active_low; led_dat->cdev.brightness_set = gpio_led_set; led_dat->cdev.brightness = cur_led->active_low ? LED_FULL : LED_OFF; ret = gpio_request(led_dat->gpio, led_dat->cdev.name); if (ret < 0) goto err; gpio_direction_output(led_dat->gpio, led_dat->active_low); INIT_WORK(&led_dat->work, gpio_led_work); ret = led_classdev_register(&pdev->dev, &led_dat->cdev); if (ret < 0) { gpio_free(led_dat->gpio); goto err; } } platform_set_drvdata(pdev, leds_data); return 0; err: if (i > 0) { for (i = i - 1; i >= 0; i--) { led_classdev_unregister(&leds_data[i].cdev); cancel_work_sync(&leds_data[i].work); gpio_free(leds_data[i].gpio); } } kfree(leds_data); return ret; } static int __devexit gpio_led_remove(struct platform_device *pdev) { int i; struct gpio_led_platform_data *pdata = pdev->dev.platform_data; struct gpio_led_data *leds_data; leds_data = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) { led_classdev_unregister(&leds_data[i].cdev); cancel_work_sync(&leds_data[i].work); gpio_free(leds_data[i].gpio); } kfree(leds_data); return 0; } #ifdef CONFIG_PM static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state) { struct gpio_led_platform_data *pdata = pdev->dev.platform_data; struct gpio_led_data *leds_data; int i; leds_data = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) led_classdev_suspend(&leds_data[i].cdev); return 0; } static int gpio_led_resume(struct platform_device *pdev) { struct gpio_led_platform_data *pdata = pdev->dev.platform_data; struct gpio_led_data *leds_data; int i; leds_data = platform_get_drvdata(pdev); for (i = 0; i < pdata->num_leds; i++) led_classdev_resume(&leds_data[i].cdev); return 0; } #else #define gpio_led_suspend NULL #define gpio_led_resume NULL #endif static struct platform_driver gpio_led_driver = { .probe = gpio_led_probe, .remove = __devexit_p(gpio_led_remove), .suspend = gpio_led_suspend, .resume = gpio_led_resume, .driver = { .name = "leds-gpio", .owner = THIS_MODULE, }, }; static int __init gpio_led_init(void) { return platform_driver_register(&gpio_led_driver); } static void __exit gpio_led_exit(void) { platform_driver_unregister(&gpio_led_driver); } module_init(gpio_led_init); module_exit(gpio_led_exit); MODULE_AUTHOR("Raphael Assenat <raph@8d.com>"); MODULE_DESCRIPTION("GPIO LED driver"); MODULE_LICENSE("GPL");
gpl-2.0
nuxeh/u-boot
drivers/usb/gadget/f_dfu.c
18
17330
/* * f_dfu.c -- Device Firmware Update USB function * * Copyright (C) 2012 Samsung Electronics * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> * Lukasz Majewski <l.majewski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <errno.h> #include <common.h> #include <malloc.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/composite.h> #include <dfu.h> #include "f_dfu.h" struct f_dfu { struct usb_function usb_function; struct usb_descriptor_header **function; struct usb_string *strings; /* when configured, we have one config */ u8 config; u8 altsetting; enum dfu_state dfu_state; unsigned int dfu_status; /* Send/received block number is handy for data integrity check */ int blk_seq_num; }; typedef int (*dfu_state_fn) (struct f_dfu *, const struct usb_ctrlrequest *, struct usb_gadget *, struct usb_request *); static inline struct f_dfu *func_to_dfu(struct usb_function *f) { return container_of(f, struct f_dfu, usb_function); } static const struct dfu_function_descriptor dfu_func = { .bLength = sizeof dfu_func, .bDescriptorType = DFU_DT_FUNC, .bmAttributes = DFU_BIT_WILL_DETACH | DFU_BIT_MANIFESTATION_TOLERANT | DFU_BIT_CAN_UPLOAD | DFU_BIT_CAN_DNLOAD, .wDetachTimeOut = 0, .wTransferSize = DFU_USB_BUFSIZ, .bcdDFUVersion = __constant_cpu_to_le16(0x0110), }; static struct usb_interface_descriptor dfu_intf_runtime = { .bLength = sizeof dfu_intf_runtime, .bDescriptorType = USB_DT_INTERFACE, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_APP_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 1, /* .iInterface = DYNAMIC */ }; static struct usb_descriptor_header *dfu_runtime_descs[] = { (struct usb_descriptor_header *) &dfu_intf_runtime, NULL, }; static const struct usb_qualifier_descriptor dev_qualifier = { .bLength = sizeof dev_qualifier, .bDescriptorType = USB_DT_DEVICE_QUALIFIER, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_VENDOR_SPEC, .bNumConfigurations = 1, }; static const char dfu_name[] = "Device Firmware Upgrade"; /* * static strings, in UTF-8 * * dfu_generic configuration */ static struct usb_string strings_dfu_generic[] = { [0].s = dfu_name, { } /* end of list */ }; static struct usb_gadget_strings stringtab_dfu_generic = { .language = 0x0409, /* en-us */ .strings = strings_dfu_generic, }; static struct usb_gadget_strings *dfu_generic_strings[] = { &stringtab_dfu_generic, NULL, }; /* * usb_function specific */ static struct usb_gadget_strings stringtab_dfu = { .language = 0x0409, /* en-us */ /* * .strings * * assigned during initialization, * depends on number of flash entities * */ }; static struct usb_gadget_strings *dfu_strings[] = { &stringtab_dfu, NULL, }; /*-------------------------------------------------------------------------*/ static void dnload_request_complete(struct usb_ep *ep, struct usb_request *req) { struct f_dfu *f_dfu = req->context; dfu_write(dfu_get_entity(f_dfu->altsetting), req->buf, req->length, f_dfu->blk_seq_num); if (req->length == 0) puts("DOWNLOAD ... OK\nCtrl+C to exit ...\n"); } static void handle_getstatus(struct usb_request *req) { struct dfu_status *dstat = (struct dfu_status *)req->buf; struct f_dfu *f_dfu = req->context; switch (f_dfu->dfu_state) { case DFU_STATE_dfuDNLOAD_SYNC: case DFU_STATE_dfuDNBUSY: f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_IDLE; break; case DFU_STATE_dfuMANIFEST_SYNC: break; default: break; } /* send status response */ dstat->bStatus = f_dfu->dfu_status; dstat->bState = f_dfu->dfu_state; dstat->iString = 0; } static void handle_getstate(struct usb_request *req) { struct f_dfu *f_dfu = req->context; ((u8 *)req->buf)[0] = f_dfu->dfu_state; req->actual = sizeof(u8); } static inline void to_dfu_mode(struct f_dfu *f_dfu) { f_dfu->usb_function.strings = dfu_strings; f_dfu->usb_function.hs_descriptors = f_dfu->function; } static inline void to_runtime_mode(struct f_dfu *f_dfu) { f_dfu->usb_function.strings = NULL; f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; } static int handle_upload(struct usb_request *req, u16 len) { struct f_dfu *f_dfu = req->context; return dfu_read(dfu_get_entity(f_dfu->altsetting), req->buf, req->length, f_dfu->blk_seq_num); } static int handle_dnload(struct usb_gadget *gadget, u16 len) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; struct f_dfu *f_dfu = req->context; if (len == 0) f_dfu->dfu_state = DFU_STATE_dfuMANIFEST_SYNC; req->complete = dnload_request_complete; return len; } /*-------------------------------------------------------------------------*/ /* DFU state machine */ static int state_app_idle(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; case USB_REQ_DFU_DETACH: f_dfu->dfu_state = DFU_STATE_appDETACH; to_dfu_mode(f_dfu); f_dfu->dfu_state = DFU_STATE_dfuIDLE; value = RET_ZLP; break; default: value = RET_STALL; break; } return value; } static int state_app_detach(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; default: f_dfu->dfu_state = DFU_STATE_appIDLE; value = RET_STALL; break; } return value; } static int state_dfu_idle(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { u16 w_value = le16_to_cpu(ctrl->wValue); u16 len = le16_to_cpu(ctrl->wLength); int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_DNLOAD: if (len == 0) { f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; f_dfu->blk_seq_num = w_value; value = handle_dnload(gadget, len); break; case USB_REQ_DFU_UPLOAD: f_dfu->dfu_state = DFU_STATE_dfuUPLOAD_IDLE; f_dfu->blk_seq_num = 0; value = handle_upload(req, len); break; case USB_REQ_DFU_ABORT: /* no zlp? */ value = RET_ZLP; break; case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; case USB_REQ_DFU_DETACH: /* * Proprietary extension: 'detach' from idle mode and * get back to runtime mode in case of USB Reset. As * much as I dislike this, we just can't use every USB * bus reset to switch back to runtime mode, since at * least the Linux USB stack likes to send a number of * resets in a row :( */ f_dfu->dfu_state = DFU_STATE_dfuMANIFEST_WAIT_RST; to_runtime_mode(f_dfu); f_dfu->dfu_state = DFU_STATE_appIDLE; break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_dnload_sync(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_dnbusy(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_dnload_idle(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { u16 w_value = le16_to_cpu(ctrl->wValue); u16 len = le16_to_cpu(ctrl->wLength); int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_DNLOAD: f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; f_dfu->blk_seq_num = w_value; value = handle_dnload(gadget, len); break; case USB_REQ_DFU_ABORT: f_dfu->dfu_state = DFU_STATE_dfuIDLE; value = RET_ZLP; break; case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_manifest_sync(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: /* We're MainfestationTolerant */ f_dfu->dfu_state = DFU_STATE_dfuIDLE; handle_getstatus(req); f_dfu->blk_seq_num = 0; value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_upload_idle(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { u16 w_value = le16_to_cpu(ctrl->wValue); u16 len = le16_to_cpu(ctrl->wLength); int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_UPLOAD: /* state transition if less data then requested */ f_dfu->blk_seq_num = w_value; value = handle_upload(req, len); if (value >= 0 && value < len) f_dfu->dfu_state = DFU_STATE_dfuIDLE; break; case USB_REQ_DFU_ABORT: f_dfu->dfu_state = DFU_STATE_dfuIDLE; /* no zlp? */ value = RET_ZLP; break; case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static int state_dfu_error(struct f_dfu *f_dfu, const struct usb_ctrlrequest *ctrl, struct usb_gadget *gadget, struct usb_request *req) { int value = 0; switch (ctrl->bRequest) { case USB_REQ_DFU_GETSTATUS: handle_getstatus(req); value = RET_STAT_LEN; break; case USB_REQ_DFU_GETSTATE: handle_getstate(req); break; case USB_REQ_DFU_CLRSTATUS: f_dfu->dfu_state = DFU_STATE_dfuIDLE; f_dfu->dfu_status = DFU_STATUS_OK; /* no zlp? */ value = RET_ZLP; break; default: f_dfu->dfu_state = DFU_STATE_dfuERROR; value = RET_STALL; break; } return value; } static dfu_state_fn dfu_state[] = { state_app_idle, /* DFU_STATE_appIDLE */ state_app_detach, /* DFU_STATE_appDETACH */ state_dfu_idle, /* DFU_STATE_dfuIDLE */ state_dfu_dnload_sync, /* DFU_STATE_dfuDNLOAD_SYNC */ state_dfu_dnbusy, /* DFU_STATE_dfuDNBUSY */ state_dfu_dnload_idle, /* DFU_STATE_dfuDNLOAD_IDLE */ state_dfu_manifest_sync, /* DFU_STATE_dfuMANIFEST_SYNC */ NULL, /* DFU_STATE_dfuMANIFEST */ NULL, /* DFU_STATE_dfuMANIFEST_WAIT_RST */ state_dfu_upload_idle, /* DFU_STATE_dfuUPLOAD_IDLE */ state_dfu_error /* DFU_STATE_dfuERROR */ }; static int dfu_handle(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct usb_gadget *gadget = f->config->cdev->gadget; struct usb_request *req = f->config->cdev->req; struct f_dfu *f_dfu = f->config->cdev->req->context; u16 len = le16_to_cpu(ctrl->wLength); u16 w_value = le16_to_cpu(ctrl->wValue); int value = 0; u8 req_type = ctrl->bRequestType & USB_TYPE_MASK; debug("w_value: 0x%x len: 0x%x\n", w_value, len); debug("req_type: 0x%x ctrl->bRequest: 0x%x f_dfu->dfu_state: 0x%x\n", req_type, ctrl->bRequest, f_dfu->dfu_state); if (req_type == USB_TYPE_STANDARD) { if (ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && (w_value >> 8) == DFU_DT_FUNC) { value = min(len, (u16) sizeof(dfu_func)); memcpy(req->buf, &dfu_func, value); } } else /* DFU specific request */ value = dfu_state[f_dfu->dfu_state] (f_dfu, ctrl, gadget, req); if (value >= 0) { req->length = value; req->zero = value < len; value = usb_ep_queue(gadget->ep0, req, 0); if (value < 0) { debug("ep_queue --> %d\n", value); req->status = 0; } } return value; } /*-------------------------------------------------------------------------*/ static int dfu_prepare_strings(struct f_dfu *f_dfu, int n) { struct dfu_entity *de = NULL; int i = 0; f_dfu->strings = calloc(sizeof(struct usb_string), n + 1); if (!f_dfu->strings) goto enomem; for (i = 0; i < n; ++i) { de = dfu_get_entity(i); f_dfu->strings[i].s = de->name; } f_dfu->strings[i].id = 0; f_dfu->strings[i].s = NULL; return 0; enomem: while (i) f_dfu->strings[--i].s = NULL; free(f_dfu->strings); return -ENOMEM; } static int dfu_prepare_function(struct f_dfu *f_dfu, int n) { struct usb_interface_descriptor *d; int i = 0; f_dfu->function = calloc(sizeof(struct usb_descriptor_header *), n); if (!f_dfu->function) goto enomem; for (i = 0; i < n; ++i) { d = calloc(sizeof(*d), 1); if (!d) goto enomem; d->bLength = sizeof(*d); d->bDescriptorType = USB_DT_INTERFACE; d->bAlternateSetting = i; d->bNumEndpoints = 0; d->bInterfaceClass = USB_CLASS_APP_SPEC; d->bInterfaceSubClass = 1; d->bInterfaceProtocol = 2; f_dfu->function[i] = (struct usb_descriptor_header *)d; } f_dfu->function[i] = NULL; return 0; enomem: while (i) { free(f_dfu->function[--i]); f_dfu->function[i] = NULL; } free(f_dfu->function); return -ENOMEM; } static int dfu_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_dfu *f_dfu = func_to_dfu(f); int alt_num = dfu_get_alt_number(); int rv, id, i; id = usb_interface_id(c, f); if (id < 0) return id; dfu_intf_runtime.bInterfaceNumber = id; f_dfu->dfu_state = DFU_STATE_appIDLE; f_dfu->dfu_status = DFU_STATUS_OK; rv = dfu_prepare_function(f_dfu, alt_num); if (rv) goto error; rv = dfu_prepare_strings(f_dfu, alt_num); if (rv) goto error; for (i = 0; i < alt_num; i++) { id = usb_string_id(cdev); if (id < 0) return id; f_dfu->strings[i].id = id; ((struct usb_interface_descriptor *)f_dfu->function[i]) ->iInterface = id; } stringtab_dfu.strings = f_dfu->strings; cdev->req->context = f_dfu; error: return rv; } static void dfu_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_dfu *f_dfu = func_to_dfu(f); int alt_num = dfu_get_alt_number(); int i; if (f_dfu->strings) { i = alt_num; while (i) f_dfu->strings[--i].s = NULL; free(f_dfu->strings); } if (f_dfu->function) { i = alt_num; while (i) { free(f_dfu->function[--i]); f_dfu->function[i] = NULL; } free(f_dfu->function); } free(f_dfu); } static int dfu_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_dfu *f_dfu = func_to_dfu(f); debug("%s: intf:%d alt:%d\n", __func__, intf, alt); f_dfu->altsetting = alt; return 0; } /* TODO: is this really what we need here? */ static void dfu_disable(struct usb_function *f) { struct f_dfu *f_dfu = func_to_dfu(f); if (f_dfu->config == 0) return; debug("%s: reset config\n", __func__); f_dfu->config = 0; } static int dfu_bind_config(struct usb_configuration *c) { struct f_dfu *f_dfu; int status; f_dfu = calloc(sizeof(*f_dfu), 1); if (!f_dfu) return -ENOMEM; f_dfu->usb_function.name = "dfu"; f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; f_dfu->usb_function.bind = dfu_bind; f_dfu->usb_function.unbind = dfu_unbind; f_dfu->usb_function.set_alt = dfu_set_alt; f_dfu->usb_function.disable = dfu_disable; f_dfu->usb_function.strings = dfu_generic_strings, f_dfu->usb_function.setup = dfu_handle, status = usb_add_function(c, &f_dfu->usb_function); if (status) free(f_dfu); return status; } int dfu_add(struct usb_configuration *c) { int id; id = usb_string_id(c->cdev); if (id < 0) return id; strings_dfu_generic[0].id = id; dfu_intf_runtime.iInterface = id; debug("%s: cdev: 0x%p gadget:0x%p gadget->ep0: 0x%p\n", __func__, c->cdev, c->cdev->gadget, c->cdev->gadget->ep0); return dfu_bind_config(c); }
gpl-2.0
gcode-mirror/audacity
lib-src/libflac/src/metaflac/operations_shorthand_cuesheet.c
18
7374
/* metaflac - Command-line FLAC metadata editor * Copyright (C) 2001-2009 Josh Coalson * Copyright (C) 2011-2013 Xiph.Org Foundation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #if HAVE_CONFIG_H # include <config.h> #endif #include <errno.h> #include <string.h> #include "options.h" #include "utils.h" #include "FLAC/assert.h" #include "share/grabbag.h" #include "share/compat.h" #include "operations_shorthand.h" static FLAC__bool import_cs_from(const char *filename, FLAC__StreamMetadata **cuesheet, const char *cs_filename, FLAC__bool *needs_write, FLAC__uint64 lead_out_offset, unsigned sample_rate, FLAC__bool is_cdda, Argument_AddSeekpoint *add_seekpoint_link); static FLAC__bool export_cs_to(const char *filename, const FLAC__StreamMetadata *cuesheet, const char *cs_filename); FLAC__bool do_shorthand_operation__cuesheet(const char *filename, FLAC__Metadata_Chain *chain, const Operation *operation, FLAC__bool *needs_write) { FLAC__bool ok = true; FLAC__StreamMetadata *cuesheet = 0; FLAC__Metadata_Iterator *iterator = FLAC__metadata_iterator_new(); FLAC__uint64 lead_out_offset = 0; FLAC__bool is_cdda = false; unsigned sample_rate = 0; if(0 == iterator) die("out of memory allocating iterator"); FLAC__metadata_iterator_init(iterator, chain); do { FLAC__StreamMetadata *block = FLAC__metadata_iterator_get_block(iterator); if(block->type == FLAC__METADATA_TYPE_STREAMINFO) { lead_out_offset = block->data.stream_info.total_samples; if(lead_out_offset == 0) { flac_fprintf(stderr, "%s: ERROR: FLAC file must have total_samples set in STREAMINFO in order to import/export cuesheet\n", filename); FLAC__metadata_iterator_delete(iterator); return false; } sample_rate = block->data.stream_info.sample_rate; is_cdda = (block->data.stream_info.channels == 1 || block->data.stream_info.channels == 2) && (block->data.stream_info.bits_per_sample == 16) && (sample_rate == 44100); } else if(block->type == FLAC__METADATA_TYPE_CUESHEET) cuesheet = block; } while(FLAC__metadata_iterator_next(iterator)); if(lead_out_offset == 0) { flac_fprintf(stderr, "%s: ERROR: FLAC stream has no STREAMINFO block\n", filename); FLAC__metadata_iterator_delete(iterator); return false; } switch(operation->type) { case OP__IMPORT_CUESHEET_FROM: if(0 != cuesheet) { flac_fprintf(stderr, "%s: ERROR: FLAC file already has CUESHEET block\n", filename); ok = false; } else { ok = import_cs_from(filename, &cuesheet, operation->argument.import_cuesheet_from.filename, needs_write, lead_out_offset, sample_rate, is_cdda, operation->argument.import_cuesheet_from.add_seekpoint_link); if(ok) { /* append CUESHEET block */ while(FLAC__metadata_iterator_next(iterator)) ; if(!FLAC__metadata_iterator_insert_block_after(iterator, cuesheet)) { print_error_with_chain_status(chain, "%s: ERROR: adding new CUESHEET block to metadata", filename); FLAC__metadata_object_delete(cuesheet); ok = false; } } } break; case OP__EXPORT_CUESHEET_TO: if(0 == cuesheet) { flac_fprintf(stderr, "%s: ERROR: FLAC file has no CUESHEET block\n", filename); ok = false; } else ok = export_cs_to(filename, cuesheet, operation->argument.filename.value); break; default: ok = false; FLAC__ASSERT(0); break; }; FLAC__metadata_iterator_delete(iterator); return ok; } /* * local routines */ FLAC__bool import_cs_from(const char *filename, FLAC__StreamMetadata **cuesheet, const char *cs_filename, FLAC__bool *needs_write, FLAC__uint64 lead_out_offset, unsigned sample_rate, FLAC__bool is_cdda, Argument_AddSeekpoint *add_seekpoint_link) { FILE *f; const char *error_message; char **seekpoint_specification = add_seekpoint_link? &(add_seekpoint_link->specification) : 0; unsigned last_line_read; if(0 == cs_filename || strlen(cs_filename) == 0) { flac_fprintf(stderr, "%s: ERROR: empty import file name\n", filename); return false; } if(0 == strcmp(cs_filename, "-")) f = stdin; else f = flac_fopen(cs_filename, "r"); if(0 == f) { flac_fprintf(stderr, "%s: ERROR: can't open import file %s: %s\n", filename, cs_filename, strerror(errno)); return false; } *cuesheet = grabbag__cuesheet_parse(f, &error_message, &last_line_read, sample_rate, is_cdda, lead_out_offset); if(f != stdin) fclose(f); if(0 == *cuesheet) { flac_fprintf(stderr, "%s: ERROR: while parsing cuesheet \"%s\" on line %u: %s\n", filename, cs_filename, last_line_read, error_message); return false; } if(!FLAC__format_cuesheet_is_legal(&(*cuesheet)->data.cue_sheet, /*check_cd_da_subset=*/false, &error_message)) { flac_fprintf(stderr, "%s: ERROR parsing cuesheet \"%s\": %s\n", filename, cs_filename, error_message); return false; } /* if we're expecting CDDA, warn about non-compliance */ if(is_cdda && !FLAC__format_cuesheet_is_legal(&(*cuesheet)->data.cue_sheet, /*check_cd_da_subset=*/true, &error_message)) { flac_fprintf(stderr, "%s: WARNING cuesheet \"%s\" is not audio CD compliant: %s\n", filename, cs_filename, error_message); (*cuesheet)->data.cue_sheet.is_cd = false; } /* add seekpoints for each index point if required */ if(0 != seekpoint_specification) { char spec[128]; unsigned track, indx; const FLAC__StreamMetadata_CueSheet *cs = &(*cuesheet)->data.cue_sheet; if(0 == *seekpoint_specification) *seekpoint_specification = local_strdup(""); for(track = 0; track < cs->num_tracks; track++) { const FLAC__StreamMetadata_CueSheet_Track *tr = cs->tracks+track; for(indx = 0; indx < tr->num_indices; indx++) { flac_snprintf(spec, sizeof (spec), "%" PRIu64 ";", (tr->offset + tr->indices[indx].offset)); local_strcat(seekpoint_specification, spec); } } } *needs_write = true; return true; } FLAC__bool export_cs_to(const char *filename, const FLAC__StreamMetadata *cuesheet, const char *cs_filename) { FILE *f; char *ref = 0; size_t reflen; if(0 == cs_filename || strlen(cs_filename) == 0) { flac_fprintf(stderr, "%s: ERROR: empty export file name\n", filename); return false; } if(0 == strcmp(cs_filename, "-")) f = stdout; else f = flac_fopen(cs_filename, "w"); if(0 == f) { flac_fprintf(stderr, "%s: ERROR: can't open export file %s: %s\n", filename, cs_filename, strerror(errno)); return false; } reflen = strlen(filename) + 7 + 1; if(0 == (ref = malloc(reflen))) { flac_fprintf(stderr, "%s: ERROR: allocating memory\n", filename); if(f != stdout) fclose(f); return false; } flac_snprintf(ref, reflen, "\"%s\" FLAC", filename); grabbag__cuesheet_emit(f, cuesheet, ref); free(ref); if(f != stdout) fclose(f); return true; }
gpl-2.0
phyber/irssi
src/fe-common/irc/dcc/fe-dcc-chat-messages.c
18
4775
/* fe-dcc-chat-messages.c : irssi Copyright (C) 2002 Timo Sirainen This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "module.h" #include "signals.h" #include "levels.h" #include "irc-servers.h" #include "irc-queries.h" #include "dcc-chat.h" #include "module-formats.h" #include "printtext.h" static void sig_message_dcc_own(CHAT_DCC_REC *dcc, const char *msg) { TEXT_DEST_REC dest; QUERY_REC *query; char *tag; tag = g_strconcat("=", dcc->id, NULL); query = query_find(NULL, tag); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCCMSGS | MSGLEVEL_NOHILIGHT | MSGLEVEL_NO_ACT, NULL); printformat_dest(&dest, query != NULL ? IRCTXT_OWN_DCC_QUERY : IRCTXT_OWN_DCC, dcc->mynick, dcc->id, msg); g_free(tag); } static void sig_message_dcc_own_action(CHAT_DCC_REC *dcc, const char *msg) { TEXT_DEST_REC dest; QUERY_REC *query; char *tag; tag = g_strconcat("=", dcc->id, NULL); query = query_find(NULL, tag); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCCMSGS | MSGLEVEL_ACTIONS | MSGLEVEL_NOHILIGHT | MSGLEVEL_NO_ACT, NULL); printformat_dest(&dest, query != NULL ? IRCTXT_OWN_DCC_ACTION_QUERY : IRCTXT_OWN_DCC_ACTION, dcc->mynick, dcc->id, msg); g_free(tag); } static void sig_message_dcc_own_ctcp(CHAT_DCC_REC *dcc, const char *cmd, const char *data) { TEXT_DEST_REC dest; char *tag; tag = g_strconcat("=", dcc->id, NULL); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCC | MSGLEVEL_CTCPS | MSGLEVEL_NOHILIGHT | MSGLEVEL_NO_ACT, NULL); printformat_dest(&dest, IRCTXT_OWN_DCC_CTCP, dcc->id, cmd, data); g_free(tag); } static void sig_message_dcc(CHAT_DCC_REC *dcc, const char *msg) { TEXT_DEST_REC dest; QUERY_REC *query; char *tag; tag = g_strconcat("=", dcc->id, NULL); query = query_find(NULL, tag); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCCMSGS, NULL); printformat_dest(&dest, query != NULL ? IRCTXT_DCC_MSG_QUERY : IRCTXT_DCC_MSG, dcc->id, msg); g_free(tag); } static void sig_message_dcc_action(CHAT_DCC_REC *dcc, const char *msg) { TEXT_DEST_REC dest; QUERY_REC *query; char *tag; tag = g_strconcat("=", dcc->id, NULL); query = query_find(NULL, tag); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCCMSGS | MSGLEVEL_ACTIONS, NULL); printformat_dest(&dest, query != NULL ? IRCTXT_ACTION_DCC_QUERY : IRCTXT_ACTION_DCC, dcc->id, msg); g_free(tag); } static void sig_message_dcc_ctcp(CHAT_DCC_REC *dcc, const char *cmd, const char *data) { TEXT_DEST_REC dest; char *tag; tag = g_strconcat("=", dcc->id, NULL); format_create_dest_tag(&dest, dcc->server, dcc->servertag, tag, MSGLEVEL_DCC | MSGLEVEL_CTCPS, NULL); printformat_dest(&dest, IRCTXT_DCC_CTCP, dcc->id, cmd, data); g_free(tag); } void fe_dcc_chat_messages_init(void) { signal_add("message dcc own", (SIGNAL_FUNC) sig_message_dcc_own); signal_add("message dcc own_action", (SIGNAL_FUNC) sig_message_dcc_own_action); signal_add("message dcc own_ctcp", (SIGNAL_FUNC) sig_message_dcc_own_ctcp); signal_add("message dcc", (SIGNAL_FUNC) sig_message_dcc); signal_add("message dcc action", (SIGNAL_FUNC) sig_message_dcc_action); signal_add("message dcc ctcp", (SIGNAL_FUNC) sig_message_dcc_ctcp); } void fe_dcc_chat_messages_deinit(void) { signal_remove("message dcc own", (SIGNAL_FUNC) sig_message_dcc_own); signal_remove("message dcc own_action", (SIGNAL_FUNC) sig_message_dcc_own_action); signal_remove("message dcc own_ctcp", (SIGNAL_FUNC) sig_message_dcc_own_ctcp); signal_remove("message dcc", (SIGNAL_FUNC) sig_message_dcc); signal_remove("message dcc action", (SIGNAL_FUNC) sig_message_dcc_action); signal_remove("message dcc ctcp", (SIGNAL_FUNC) sig_message_dcc_ctcp); }
gpl-2.0
HarveyHunt/linux
net/core/pktgen.c
18
97442
/* * Authors: * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> * Uppsala University and * Swedish University of Agricultural Sciences * * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Ben Greear <greearb@candelatech.com> * Jens Låås <jens.laas@data.slu.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * * A tool for loading the network with preconfigurated packets. * The tool is implemented as a linux module. Parameters are output * device, delay (to hard_xmit), number of packets, and whether * to use multiple SKBs or just the same one. * pktgen uses the installed interface's output routine. * * Additional hacking by: * * Jens.Laas@data.slu.se * Improved by ANK. 010120. * Improved by ANK even more. 010212. * MAC address typo fixed. 010417 --ro * Integrated. 020301 --DaveM * Added multiskb option 020301 --DaveM * Scaling of results. 020417--sigurdur@linpro.no * Significant re-work of the module: * * Convert to threaded model to more efficiently be able to transmit * and receive on multiple interfaces at once. * * Converted many counters to __u64 to allow longer runs. * * Allow configuration of ranges, like min/max IP address, MACs, * and UDP-ports, for both source and destination, and can * set to use a random distribution or sequentially walk the range. * * Can now change most values after starting. * * Place 12-byte packet in UDP payload with magic number, * sequence number, and timestamp. * * Add receiver code that detects dropped pkts, re-ordered pkts, and * latencies (with micro-second) precision. * * Add IOCTL interface to easily get counters & configuration. * --Ben Greear <greearb@candelatech.com> * * Renamed multiskb to clone_skb and cleaned up sending core for two distinct * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 * as a "fastpath" with a configurable number of clones after alloc's. * clone_skb=0 means all packets are allocated this also means ranges time * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 * clones. * * Also moved to /proc/net/pktgen/ * --ro * * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever * mistakes. Also merged in DaveM's patch in the -pre6 patch. * --Ben Greear <greearb@candelatech.com> * * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) * * * 021124 Finished major redesign and rewrite for new functionality. * See Documentation/networking/pktgen.txt for how to use this. * * The new operation: * For each CPU one thread/process is created at start. This process checks * for running devices in the if_list and sends packets until count is 0 it * also the thread checks the thread->control which is used for inter-process * communication. controlling process "posts" operations to the threads this * way. * The if_list is RCU protected, and the if_lock remains to protect updating * of if_list, from "add_device" as it invoked from userspace (via proc write). * * By design there should only be *one* "controlling" process. In practice * multiple write accesses gives unpredictable result. Understood by "write" * to /proc gives result code thats should be read be the "writer". * For practical use this should be no problem. * * Note when adding devices to a specific CPU there good idea to also assign * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. * --ro * * Fix refcount off by one if first packet fails, potential null deref, * memleak 030710- KJP * * First "ranges" functionality for ipv6 030726 --ro * * Included flow support. 030802 ANK. * * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> * * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 * * New xmit() return, do_div and misc clean up by Stephen Hemminger * <shemminger@osdl.org> 040923 * * Randy Dunlap fixed u64 printk compiler warning * * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 * * Corrections from Nikolai Malykh (nmalykh@bilim.com) * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 * * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> * 050103 * * MPLS support by Steven Whitehouse <steve@chygwyn.com> * * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> * * Fixed src_mac command to set source mac of packet to value specified in * command by Adit Ranadive <adit.262@gmail.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sys.h> #include <linux/types.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/unistd.h> #include <linux/string.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/capability.h> #include <linux/hrtimer.h> #include <linux/freezer.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/wait.h> #include <linux/etherdevice.h> #include <linux/kthread.h> #include <linux/prefetch.h> #include <net/net_namespace.h> #include <net/checksum.h> #include <net/ipv6.h> #include <net/udp.h> #include <net/ip6_checksum.h> #include <net/addrconf.h> #ifdef CONFIG_XFRM #include <net/xfrm.h> #endif #include <net/netns/generic.h> #include <asm/byteorder.h> #include <linux/rcupdate.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/timex.h> #include <linux/uaccess.h> #include <asm/dma.h> #include <asm/div64.h> /* do_div */ #define VERSION "2.75" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ #define MPLS_STACK_BOTTOM htonl(0x00000100) #define func_enter() pr_debug("entering %s\n", __func__); #define PKT_FLAGS \ pf(IPV6) /* Interface in IPV6 Mode */ \ pf(IPSRC_RND) /* IP-Src Random */ \ pf(IPDST_RND) /* IP-Dst Random */ \ pf(TXSIZE_RND) /* Transmit size is random */ \ pf(UDPSRC_RND) /* UDP-Src Random */ \ pf(UDPDST_RND) /* UDP-Dst Random */ \ pf(UDPCSUM) /* Include UDP checksum */ \ pf(NO_TIMESTAMP) /* Don't timestamp packets (default TS) */ \ pf(MPLS_RND) /* Random MPLS labels */ \ pf(QUEUE_MAP_RND) /* queue map Random */ \ pf(QUEUE_MAP_CPU) /* queue map mirrors smp_processor_id() */ \ pf(FLOW_SEQ) /* Sequential flows */ \ pf(IPSEC) /* ipsec on for flows */ \ pf(MACSRC_RND) /* MAC-Src Random */ \ pf(MACDST_RND) /* MAC-Dst Random */ \ pf(VID_RND) /* Random VLAN ID */ \ pf(SVID_RND) /* Random SVLAN ID */ \ pf(NODE) /* Node memory alloc*/ \ #define pf(flag) flag##_SHIFT, enum pkt_flags { PKT_FLAGS }; #undef pf /* Device flag bits */ #define pf(flag) static const __u32 F_##flag = (1<<flag##_SHIFT); PKT_FLAGS #undef pf #define pf(flag) __stringify(flag), static char *pkt_flag_names[] = { PKT_FLAGS }; #undef pf #define NR_PKT_FLAGS ARRAY_SIZE(pkt_flag_names) /* Thread control flag bits */ #define T_STOP (1<<0) /* Stop run */ #define T_RUN (1<<1) /* Start run */ #define T_REMDEVALL (1<<2) /* Remove all devs */ #define T_REMDEV (1<<3) /* Remove one dev */ /* Xmit modes */ #define M_START_XMIT 0 /* Default normal TX */ #define M_NETIF_RECEIVE 1 /* Inject packets into stack */ #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ /* If lock -- protects updating of if_list */ #define if_lock(t) mutex_lock(&(t->if_lock)); #define if_unlock(t) mutex_unlock(&(t->if_lock)); /* Used to help with determining the pkts on receive */ #define PKTGEN_MAGIC 0xbe9be955 #define PG_PROC_DIR "pktgen" #define PGCTRL "pgctrl" #define MAX_CFLOWS 65536 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) struct flow_state { __be32 cur_daddr; int count; #ifdef CONFIG_XFRM struct xfrm_state *x; #endif __u32 flags; }; /* flow flag bits */ #define F_INIT (1<<0) /* flow has been initialized */ struct pktgen_dev { /* * Try to keep frequent/infrequent used vars. separated. */ struct proc_dir_entry *entry; /* proc file */ struct pktgen_thread *pg_thread;/* the owner */ struct list_head list; /* chaining in the thread's run-queue */ struct rcu_head rcu; /* freed by RCU */ int running; /* if false, the test will stop */ /* If min != max, then we will either do a linear iteration, or * we will do a random selection from within the range. */ __u32 flags; int xmit_mode; int min_pkt_size; int max_pkt_size; int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ int nfrags; int removal_mark; /* non-zero => the device is marked for * removal by worker thread */ struct page *page; u64 delay; /* nano-seconds */ __u64 count; /* Default No packets to send */ __u64 sofar; /* How many pkts we've sent so far */ __u64 tx_bytes; /* How many bytes we've transmitted */ __u64 errors; /* Errors when trying to transmit, */ /* runtime counters relating to clone_skb */ __u32 clone_count; int last_ok; /* Was last skb sent? * Or a failed transmit of some sort? * This will keep sequence numbers in order */ ktime_t next_tx; ktime_t started_at; ktime_t stopped_at; u64 idle_acc; /* nano-seconds */ __u32 seq_num; int clone_skb; /* * Use multiple SKBs during packet gen. * If this number is greater than 1, then * that many copies of the same packet will be * sent before a new packet is allocated. * If you want to send 1024 identical packets * before creating a new packet, * set clone_skb to 1024. */ char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ struct in6_addr in6_saddr; struct in6_addr in6_daddr; struct in6_addr cur_in6_daddr; struct in6_addr cur_in6_saddr; /* For ranges */ struct in6_addr min_in6_daddr; struct in6_addr max_in6_daddr; struct in6_addr min_in6_saddr; struct in6_addr max_in6_saddr; /* If we're doing ranges, random or incremental, then this * defines the min/max for those ranges. */ __be32 saddr_min; /* inclusive, source IP address */ __be32 saddr_max; /* exclusive, source IP address */ __be32 daddr_min; /* inclusive, dest IP address */ __be32 daddr_max; /* exclusive, dest IP address */ __u16 udp_src_min; /* inclusive, source UDP port */ __u16 udp_src_max; /* exclusive, source UDP port */ __u16 udp_dst_min; /* inclusive, dest UDP port */ __u16 udp_dst_max; /* exclusive, dest UDP port */ /* DSCP + ECN */ __u8 tos; /* six MSB of (former) IPv4 TOS are for dscp codepoint */ __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */ /* MPLS */ unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ __be32 labels[MAX_MPLS_LABELS]; /* VLAN/SVLAN (802.1Q/Q-in-Q) */ __u8 vlan_p; __u8 vlan_cfi; __u16 vlan_id; /* 0xffff means no vlan tag */ __u8 svlan_p; __u8 svlan_cfi; __u16 svlan_id; /* 0xffff means no svlan tag */ __u32 src_mac_count; /* How many MACs to iterate through */ __u32 dst_mac_count; /* How many MACs to iterate through */ unsigned char dst_mac[ETH_ALEN]; unsigned char src_mac[ETH_ALEN]; __u32 cur_dst_mac_offset; __u32 cur_src_mac_offset; __be32 cur_saddr; __be32 cur_daddr; __u16 ip_id; __u16 cur_udp_dst; __u16 cur_udp_src; __u16 cur_queue_map; __u32 cur_pkt_size; __u32 last_pkt_size; __u8 hh[14]; /* = { 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, We fill in SRC address later 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00 }; */ __u16 pad; /* pad out the hh struct to an even 16 bytes */ struct sk_buff *skb; /* skb we are to transmit next, used for when we * are transmitting the same one multiple times */ struct net_device *odev; /* The out-going device. * Note that the device should have it's * pg_info pointer pointing back to this * device. * Set when the user specifies the out-going * device name (not when the inject is * started as it used to do.) */ char odevname[32]; struct flow_state *flows; unsigned int cflows; /* Concurrent flows (config) */ unsigned int lflow; /* Flow length (config) */ unsigned int nflows; /* accumulated flows (stats) */ unsigned int curfl; /* current sequenced flow (state)*/ u16 queue_map_min; u16 queue_map_max; __u32 skb_priority; /* skb priority field */ unsigned int burst; /* number of duplicated packets to burst */ int node; /* Memory node */ #ifdef CONFIG_XFRM __u8 ipsmode; /* IPSEC mode (config) */ __u8 ipsproto; /* IPSEC type (config) */ __u32 spi; struct xfrm_dst xdst; struct dst_ops dstops; #endif char result[512]; }; struct pktgen_hdr { __be32 pgh_magic; __be32 seq_num; __be32 tv_sec; __be32 tv_usec; }; static unsigned int pg_net_id __read_mostly; struct pktgen_net { struct net *net; struct proc_dir_entry *proc_dir; struct list_head pktgen_threads; bool pktgen_exiting; }; struct pktgen_thread { struct mutex if_lock; /* for list of devices */ struct list_head if_list; /* All device here */ struct list_head th_list; struct task_struct *tsk; char result[512]; /* Field for thread to receive "posted" events terminate, stop ifs etc. */ u32 control; int cpu; wait_queue_head_t queue; struct completion start_done; struct pktgen_net *net; }; #define REMOVE 1 #define FIND 0 static const char version[] = "Packet Generator for packet performance testing. " "Version: " VERSION "\n"; static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char *ifname, bool exact); static int pktgen_device_event(struct notifier_block *, unsigned long, void *); static void pktgen_run_all_threads(struct pktgen_net *pn); static void pktgen_reset_all_threads(struct pktgen_net *pn); static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn); static void pktgen_stop(struct pktgen_thread *t); static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); /* Module parameters, defaults. */ static int pg_count_d __read_mostly = 1000; static int pg_delay_d __read_mostly; static int pg_clone_skb_d __read_mostly; static int debug __read_mostly; static DEFINE_MUTEX(pktgen_thread_lock); static struct notifier_block pktgen_notifier_block = { .notifier_call = pktgen_device_event, }; /* * /proc handling functions * */ static int pgctrl_show(struct seq_file *seq, void *v) { seq_puts(seq, version); return 0; } static ssize_t pgctrl_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char data[128]; struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (count == 0) return -EINVAL; if (count > sizeof(data)) count = sizeof(data); if (copy_from_user(data, buf, count)) return -EFAULT; data[count - 1] = 0; /* Strip trailing '\n' and terminate string */ if (!strcmp(data, "stop")) pktgen_stop_all_threads_ifs(pn); else if (!strcmp(data, "start")) pktgen_run_all_threads(pn); else if (!strcmp(data, "reset")) pktgen_reset_all_threads(pn); else return -EINVAL; return count; } static int pgctrl_open(struct inode *inode, struct file *file) { return single_open(file, pgctrl_show, PDE_DATA(inode)); } static const struct file_operations pktgen_fops = { .open = pgctrl_open, .read = seq_read, .llseek = seq_lseek, .write = pgctrl_write, .release = single_release, }; static int pktgen_if_show(struct seq_file *seq, void *v) { const struct pktgen_dev *pkt_dev = seq->private; ktime_t stopped; unsigned int i; u64 idle; seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); seq_printf(seq, " frags: %d delay: %llu clone_skb: %d ifname: %s\n", pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, pkt_dev->clone_skb, pkt_dev->odevname); seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); seq_printf(seq, " queue_map_min: %u queue_map_max: %u\n", pkt_dev->queue_map_min, pkt_dev->queue_map_max); if (pkt_dev->skb_priority) seq_printf(seq, " skb_priority: %u\n", pkt_dev->skb_priority); if (pkt_dev->flags & F_IPV6) { seq_printf(seq, " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n" " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n", &pkt_dev->in6_saddr, &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr, &pkt_dev->in6_daddr, &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr); } else { seq_printf(seq, " dst_min: %s dst_max: %s\n", pkt_dev->dst_min, pkt_dev->dst_max); seq_printf(seq, " src_min: %s src_max: %s\n", pkt_dev->src_min, pkt_dev->src_max); } seq_puts(seq, " src_mac: "); seq_printf(seq, "%pM ", is_zero_ether_addr(pkt_dev->src_mac) ? pkt_dev->odev->dev_addr : pkt_dev->src_mac); seq_puts(seq, "dst_mac: "); seq_printf(seq, "%pM\n", pkt_dev->dst_mac); seq_printf(seq, " udp_src_min: %d udp_src_max: %d" " udp_dst_min: %d udp_dst_max: %d\n", pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); seq_printf(seq, " src_mac_count: %d dst_mac_count: %d\n", pkt_dev->src_mac_count, pkt_dev->dst_mac_count); if (pkt_dev->nr_labels) { seq_puts(seq, " mpls: "); for (i = 0; i < pkt_dev->nr_labels; i++) seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), i == pkt_dev->nr_labels-1 ? "\n" : ", "); } if (pkt_dev->vlan_id != 0xffff) seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi); if (pkt_dev->svlan_id != 0xffff) seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi); if (pkt_dev->tos) seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); if (pkt_dev->traffic_class) seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); if (pkt_dev->burst > 1) seq_printf(seq, " burst: %d\n", pkt_dev->burst); if (pkt_dev->node >= 0) seq_printf(seq, " node: %d\n", pkt_dev->node); if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) seq_puts(seq, " xmit_mode: netif_receive\n"); else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) seq_puts(seq, " xmit_mode: xmit_queue\n"); seq_puts(seq, " Flags: "); for (i = 0; i < NR_PKT_FLAGS; i++) { if (i == F_FLOW_SEQ) if (!pkt_dev->cflows) continue; if (pkt_dev->flags & (1 << i)) seq_printf(seq, "%s ", pkt_flag_names[i]); else if (i == F_FLOW_SEQ) seq_puts(seq, "FLOW_RND "); #ifdef CONFIG_XFRM if (i == F_IPSEC && pkt_dev->spi) seq_printf(seq, "spi:%u", pkt_dev->spi); #endif } seq_puts(seq, "\n"); /* not really stopped, more like last-running-at */ stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at; idle = pkt_dev->idle_acc; do_div(idle, NSEC_PER_USEC); seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n", (unsigned long long)pkt_dev->sofar, (unsigned long long)pkt_dev->errors); seq_printf(seq, " started: %lluus stopped: %lluus idle: %lluus\n", (unsigned long long) ktime_to_us(pkt_dev->started_at), (unsigned long long) ktime_to_us(stopped), (unsigned long long) idle); seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset); if (pkt_dev->flags & F_IPV6) { seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n", &pkt_dev->cur_in6_saddr, &pkt_dev->cur_in6_daddr); } else seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map); seq_printf(seq, " flows: %u\n", pkt_dev->nflows); if (pkt_dev->result[0]) seq_printf(seq, "Result: %s\n", pkt_dev->result); else seq_puts(seq, "Result: Idle\n"); return 0; } static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num) { int i = 0; *num = 0; for (; i < maxlen; i++) { int value; char c; *num <<= 4; if (get_user(c, &user_buffer[i])) return -EFAULT; value = hex_to_bin(c); if (value >= 0) *num |= value; else break; } return i; } static int count_trail_chars(const char __user * user_buffer, unsigned int maxlen) { int i; for (i = 0; i < maxlen; i++) { char c; if (get_user(c, &user_buffer[i])) return -EFAULT; switch (c) { case '\"': case '\n': case '\r': case '\t': case ' ': case '=': break; default: goto done; } } done: return i; } static long num_arg(const char __user *user_buffer, unsigned long maxlen, unsigned long *num) { int i; *num = 0; for (i = 0; i < maxlen; i++) { char c; if (get_user(c, &user_buffer[i])) return -EFAULT; if ((c >= '0') && (c <= '9')) { *num *= 10; *num += c - '0'; } else break; } return i; } static int strn_len(const char __user * user_buffer, unsigned int maxlen) { int i; for (i = 0; i < maxlen; i++) { char c; if (get_user(c, &user_buffer[i])) return -EFAULT; switch (c) { case '\"': case '\n': case '\r': case '\t': case ' ': goto done_str; default: break; } } done_str: return i; } static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) { unsigned int n = 0; char c; ssize_t i = 0; int len; pkt_dev->nr_labels = 0; do { __u32 tmp; len = hex32_arg(&buffer[i], 8, &tmp); if (len <= 0) return len; pkt_dev->labels[n] = htonl(tmp); if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) pkt_dev->flags |= F_MPLS_RND; i += len; if (get_user(c, &buffer[i])) return -EFAULT; i++; n++; if (n >= MAX_MPLS_LABELS) return -E2BIG; } while (c == ','); pkt_dev->nr_labels = n; return i; } static __u32 pktgen_read_flag(const char *f, bool *disable) { __u32 i; if (f[0] == '!') { *disable = true; f++; } for (i = 0; i < NR_PKT_FLAGS; i++) { if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT) continue; /* allow only disabling ipv6 flag */ if (!*disable && i == IPV6_SHIFT) continue; if (strcmp(f, pkt_flag_names[i]) == 0) return 1 << i; } if (strcmp(f, "FLOW_RND") == 0) { *disable = !*disable; return F_FLOW_SEQ; } return 0; } static ssize_t pktgen_if_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) { struct seq_file *seq = file->private_data; struct pktgen_dev *pkt_dev = seq->private; int i, max, len; char name[16], valstr[32]; unsigned long value = 0; char *pg_result = NULL; int tmp = 0; char buf[128]; pg_result = &(pkt_dev->result[0]); if (count < 1) { pr_warn("wrong command format\n"); return -EINVAL; } max = count; tmp = count_trail_chars(user_buffer, max); if (tmp < 0) { pr_warn("illegal format\n"); return tmp; } i = tmp; /* Read variable name */ len = strn_len(&user_buffer[i], sizeof(name) - 1); if (len < 0) return len; memset(name, 0, sizeof(name)); if (copy_from_user(name, &user_buffer[i], len)) return -EFAULT; i += len; max = count - i; len = count_trail_chars(&user_buffer[i], max); if (len < 0) return len; i += len; if (debug) { size_t copy = min_t(size_t, count, 1023); char tb[copy + 1]; if (copy_from_user(tb, user_buffer, copy)) return -EFAULT; tb[copy] = 0; pr_debug("%s,%lu buffer -:%s:-\n", name, (unsigned long)count, tb); } if (!strcmp(name, "min_pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->min_pkt_size) { pkt_dev->min_pkt_size = value; pkt_dev->cur_pkt_size = value; } sprintf(pg_result, "OK: min_pkt_size=%u", pkt_dev->min_pkt_size); return count; } if (!strcmp(name, "max_pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->max_pkt_size) { pkt_dev->max_pkt_size = value; pkt_dev->cur_pkt_size = value; } sprintf(pg_result, "OK: max_pkt_size=%u", pkt_dev->max_pkt_size); return count; } /* Shortcut for min = max */ if (!strcmp(name, "pkt_size")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->min_pkt_size) { pkt_dev->min_pkt_size = value; pkt_dev->max_pkt_size = value; pkt_dev->cur_pkt_size = value; } sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size); return count; } if (!strcmp(name, "debug")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; debug = value; sprintf(pg_result, "OK: debug=%u", debug); return count; } if (!strcmp(name, "frags")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; pkt_dev->nfrags = value; sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); return count; } if (!strcmp(name, "delay")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value == 0x7FFFFFFF) pkt_dev->delay = ULLONG_MAX; else pkt_dev->delay = (u64)value; sprintf(pg_result, "OK: delay=%llu", (unsigned long long) pkt_dev->delay); return count; } if (!strcmp(name, "rate")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (!value) return len; pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; if (debug) pr_info("Delay set at: %llu ns\n", pkt_dev->delay); sprintf(pg_result, "OK: rate=%lu", value); return count; } if (!strcmp(name, "ratep")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (!value) return len; pkt_dev->delay = NSEC_PER_SEC/value; if (debug) pr_info("Delay set at: %llu ns\n", pkt_dev->delay); sprintf(pg_result, "OK: rate=%lu", value); return count; } if (!strcmp(name, "udp_src_min")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value != pkt_dev->udp_src_min) { pkt_dev->udp_src_min = value; pkt_dev->cur_udp_src = value; } sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); return count; } if (!strcmp(name, "udp_dst_min")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value != pkt_dev->udp_dst_min) { pkt_dev->udp_dst_min = value; pkt_dev->cur_udp_dst = value; } sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); return count; } if (!strcmp(name, "udp_src_max")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value != pkt_dev->udp_src_max) { pkt_dev->udp_src_max = value; pkt_dev->cur_udp_src = value; } sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); return count; } if (!strcmp(name, "udp_dst_max")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value != pkt_dev->udp_dst_max) { pkt_dev->udp_dst_max = value; pkt_dev->cur_udp_dst = value; } sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); return count; } if (!strcmp(name, "clone_skb")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; if ((value > 0) && ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) || !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) return -ENOTSUPP; i += len; pkt_dev->clone_skb = value; sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); return count; } if (!strcmp(name, "count")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; pkt_dev->count = value; sprintf(pg_result, "OK: count=%llu", (unsigned long long)pkt_dev->count); return count; } if (!strcmp(name, "src_mac_count")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (pkt_dev->src_mac_count != value) { pkt_dev->src_mac_count = value; pkt_dev->cur_src_mac_offset = 0; } sprintf(pg_result, "OK: src_mac_count=%d", pkt_dev->src_mac_count); return count; } if (!strcmp(name, "dst_mac_count")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (pkt_dev->dst_mac_count != value) { pkt_dev->dst_mac_count = value; pkt_dev->cur_dst_mac_offset = 0; } sprintf(pg_result, "OK: dst_mac_count=%d", pkt_dev->dst_mac_count); return count; } if (!strcmp(name, "burst")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if ((value > 1) && ((pkt_dev->xmit_mode == M_QUEUE_XMIT) || ((pkt_dev->xmit_mode == M_START_XMIT) && (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))))) return -ENOTSUPP; pkt_dev->burst = value < 1 ? 1 : value; sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); return count; } if (!strcmp(name, "node")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (node_possible(value)) { pkt_dev->node = value; sprintf(pg_result, "OK: node=%d", pkt_dev->node); if (pkt_dev->page) { put_page(pkt_dev->page); pkt_dev->page = NULL; } } else sprintf(pg_result, "ERROR: node not possible"); return count; } if (!strcmp(name, "xmit_mode")) { char f[32]; memset(f, 0, 32); len = strn_len(&user_buffer[i], sizeof(f) - 1); if (len < 0) return len; if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; i += len; if (strcmp(f, "start_xmit") == 0) { pkt_dev->xmit_mode = M_START_XMIT; } else if (strcmp(f, "netif_receive") == 0) { /* clone_skb set earlier, not supported in this mode */ if (pkt_dev->clone_skb > 0) return -ENOTSUPP; pkt_dev->xmit_mode = M_NETIF_RECEIVE; /* make sure new packet is allocated every time * pktgen_xmit() is called */ pkt_dev->last_ok = 1; /* override clone_skb if user passed default value * at module loading time */ pkt_dev->clone_skb = 0; } else if (strcmp(f, "queue_xmit") == 0) { pkt_dev->xmit_mode = M_QUEUE_XMIT; pkt_dev->last_ok = 1; } else { sprintf(pg_result, "xmit_mode -:%s:- unknown\nAvailable modes: %s", f, "start_xmit, netif_receive\n"); return count; } sprintf(pg_result, "OK: xmit_mode=%s", f); return count; } if (!strcmp(name, "flag")) { __u32 flag; char f[32]; bool disable = false; memset(f, 0, 32); len = strn_len(&user_buffer[i], sizeof(f) - 1); if (len < 0) return len; if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; i += len; flag = pktgen_read_flag(f, &disable); if (flag) { if (disable) pkt_dev->flags &= ~flag; else pkt_dev->flags |= flag; } else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", f, "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, " "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, " "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, " "NO_TIMESTAMP, " #ifdef CONFIG_XFRM "IPSEC, " #endif "NODE_ALLOC\n"); return count; } sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); return count; } if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); if (len < 0) return len; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; if (strcmp(buf, pkt_dev->dst_min) != 0) { memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); strncpy(pkt_dev->dst_min, buf, len); pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); pkt_dev->cur_daddr = pkt_dev->daddr_min; } if (debug) pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); i += len; sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); return count; } if (!strcmp(name, "dst_max")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); if (len < 0) return len; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; if (strcmp(buf, pkt_dev->dst_max) != 0) { memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); strncpy(pkt_dev->dst_max, buf, len); pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); pkt_dev->cur_daddr = pkt_dev->daddr_max; } if (debug) pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); i += len; sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); return count; } if (!strcmp(name, "dst6")) { len = strn_len(&user_buffer[i], sizeof(buf) - 1); if (len < 0) return len; pkt_dev->flags |= F_IPV6; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; if (debug) pr_debug("dst6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6=%s", buf); return count; } if (!strcmp(name, "dst6_min")) { len = strn_len(&user_buffer[i], sizeof(buf) - 1); if (len < 0) return len; pkt_dev->flags |= F_IPV6; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; if (debug) pr_debug("dst6_min set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_min=%s", buf); return count; } if (!strcmp(name, "dst6_max")) { len = strn_len(&user_buffer[i], sizeof(buf) - 1); if (len < 0) return len; pkt_dev->flags |= F_IPV6; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); if (debug) pr_debug("dst6_max set to: %s\n", buf); i += len; sprintf(pg_result, "OK: dst6_max=%s", buf); return count; } if (!strcmp(name, "src6")) { len = strn_len(&user_buffer[i], sizeof(buf) - 1); if (len < 0) return len; pkt_dev->flags |= F_IPV6; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; if (debug) pr_debug("src6 set to: %s\n", buf); i += len; sprintf(pg_result, "OK: src6=%s", buf); return count; } if (!strcmp(name, "src_min")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); if (len < 0) return len; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; if (strcmp(buf, pkt_dev->src_min) != 0) { memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); strncpy(pkt_dev->src_min, buf, len); pkt_dev->saddr_min = in_aton(pkt_dev->src_min); pkt_dev->cur_saddr = pkt_dev->saddr_min; } if (debug) pr_debug("src_min set to: %s\n", pkt_dev->src_min); i += len; sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); return count; } if (!strcmp(name, "src_max")) { len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); if (len < 0) return len; if (copy_from_user(buf, &user_buffer[i], len)) return -EFAULT; buf[len] = 0; if (strcmp(buf, pkt_dev->src_max) != 0) { memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); strncpy(pkt_dev->src_max, buf, len); pkt_dev->saddr_max = in_aton(pkt_dev->src_max); pkt_dev->cur_saddr = pkt_dev->saddr_max; } if (debug) pr_debug("src_max set to: %s\n", pkt_dev->src_max); i += len; sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); return count; } if (!strcmp(name, "dst_mac")) { len = strn_len(&user_buffer[i], sizeof(valstr) - 1); if (len < 0) return len; memset(valstr, 0, sizeof(valstr)); if (copy_from_user(valstr, &user_buffer[i], len)) return -EFAULT; if (!mac_pton(valstr, pkt_dev->dst_mac)) return -EINVAL; /* Set up Dest MAC */ ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac); sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac); return count; } if (!strcmp(name, "src_mac")) { len = strn_len(&user_buffer[i], sizeof(valstr) - 1); if (len < 0) return len; memset(valstr, 0, sizeof(valstr)); if (copy_from_user(valstr, &user_buffer[i], len)) return -EFAULT; if (!mac_pton(valstr, pkt_dev->src_mac)) return -EINVAL; /* Set up Src MAC */ ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac); sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac); return count; } if (!strcmp(name, "clear_counters")) { pktgen_clear_counters(pkt_dev); sprintf(pg_result, "OK: Clearing counters.\n"); return count; } if (!strcmp(name, "flows")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; if (value > MAX_CFLOWS) value = MAX_CFLOWS; pkt_dev->cflows = value; sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); return count; } #ifdef CONFIG_XFRM if (!strcmp(name, "spi")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; pkt_dev->spi = value; sprintf(pg_result, "OK: spi=%u", pkt_dev->spi); return count; } #endif if (!strcmp(name, "flowlen")) { len = num_arg(&user_buffer[i], 10, &value); if (len < 0) return len; i += len; pkt_dev->lflow = value; sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); return count; } if (!strcmp(name, "queue_map_min")) { len = num_arg(&user_buffer[i], 5, &value); if (len < 0) return len; i += len; pkt_dev->queue_map_min = value; sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); return count; } if (!strcmp(name, "queue_map_max")) { len = num_arg(&user_buffer[i], 5, &value); if (len < 0) return len; i += len; pkt_dev->queue_map_max = value; sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); return count; } if (!strcmp(name, "mpls")) { unsigned int n, cnt; len = get_labels(&user_buffer[i], pkt_dev); if (len < 0) return len; i += len; cnt = sprintf(pg_result, "OK: mpls="); for (n = 0; n < pkt_dev->nr_labels; n++) cnt += sprintf(pg_result + cnt, "%08x%s", ntohl(pkt_dev->labels[n]), n == pkt_dev->nr_labels-1 ? "" : ","); if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ pkt_dev->svlan_id = 0xffff; if (debug) pr_debug("VLAN/SVLAN auto turned off\n"); } return count; } if (!strcmp(name, "vlan_id")) { len = num_arg(&user_buffer[i], 4, &value); if (len < 0) return len; i += len; if (value <= 4095) { pkt_dev->vlan_id = value; /* turn on VLAN */ if (debug) pr_debug("VLAN turned on\n"); if (debug && pkt_dev->nr_labels) pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); } else { pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ pkt_dev->svlan_id = 0xffff; if (debug) pr_debug("VLAN/SVLAN turned off\n"); } return count; } if (!strcmp(name, "vlan_p")) { len = num_arg(&user_buffer[i], 1, &value); if (len < 0) return len; i += len; if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_p = value; sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); } else { sprintf(pg_result, "ERROR: vlan_p must be 0-7"); } return count; } if (!strcmp(name, "vlan_cfi")) { len = num_arg(&user_buffer[i], 1, &value); if (len < 0) return len; i += len; if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_cfi = value; sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); } else { sprintf(pg_result, "ERROR: vlan_cfi must be 0-1"); } return count; } if (!strcmp(name, "svlan_id")) { len = num_arg(&user_buffer[i], 4, &value); if (len < 0) return len; i += len; if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { pkt_dev->svlan_id = value; /* turn on SVLAN */ if (debug) pr_debug("SVLAN turned on\n"); if (debug && pkt_dev->nr_labels) pr_debug("MPLS auto turned off\n"); pkt_dev->nr_labels = 0; /* turn off MPLS */ sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); } else { pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ pkt_dev->svlan_id = 0xffff; if (debug) pr_debug("VLAN/SVLAN turned off\n"); } return count; } if (!strcmp(name, "svlan_p")) { len = num_arg(&user_buffer[i], 1, &value); if (len < 0) return len; i += len; if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_p = value; sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); } else { sprintf(pg_result, "ERROR: svlan_p must be 0-7"); } return count; } if (!strcmp(name, "svlan_cfi")) { len = num_arg(&user_buffer[i], 1, &value); if (len < 0) return len; i += len; if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_cfi = value; sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); } else { sprintf(pg_result, "ERROR: svlan_cfi must be 0-1"); } return count; } if (!strcmp(name, "tos")) { __u32 tmp_value = 0; len = hex32_arg(&user_buffer[i], 2, &tmp_value); if (len < 0) return len; i += len; if (len == 2) { pkt_dev->tos = tmp_value; sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); } else { sprintf(pg_result, "ERROR: tos must be 00-ff"); } return count; } if (!strcmp(name, "traffic_class")) { __u32 tmp_value = 0; len = hex32_arg(&user_buffer[i], 2, &tmp_value); if (len < 0) return len; i += len; if (len == 2) { pkt_dev->traffic_class = tmp_value; sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); } else { sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); } return count; } if (!strcmp(name, "skb_priority")) { len = num_arg(&user_buffer[i], 9, &value); if (len < 0) return len; i += len; pkt_dev->skb_priority = value; sprintf(pg_result, "OK: skb_priority=%i", pkt_dev->skb_priority); return count; } sprintf(pkt_dev->result, "No such parameter \"%s\"", name); return -EINVAL; } static int pktgen_if_open(struct inode *inode, struct file *file) { return single_open(file, pktgen_if_show, PDE_DATA(inode)); } static const struct file_operations pktgen_if_fops = { .open = pktgen_if_open, .read = seq_read, .llseek = seq_lseek, .write = pktgen_if_write, .release = single_release, }; static int pktgen_thread_show(struct seq_file *seq, void *v) { struct pktgen_thread *t = seq->private; const struct pktgen_dev *pkt_dev; BUG_ON(!t); seq_puts(seq, "Running: "); rcu_read_lock(); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) if (pkt_dev->running) seq_printf(seq, "%s ", pkt_dev->odevname); seq_puts(seq, "\nStopped: "); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) if (!pkt_dev->running) seq_printf(seq, "%s ", pkt_dev->odevname); if (t->result[0]) seq_printf(seq, "\nResult: %s\n", t->result); else seq_puts(seq, "\nResult: NA\n"); rcu_read_unlock(); return 0; } static ssize_t pktgen_thread_write(struct file *file, const char __user * user_buffer, size_t count, loff_t * offset) { struct seq_file *seq = file->private_data; struct pktgen_thread *t = seq->private; int i, max, len, ret; char name[40]; char *pg_result; if (count < 1) { // sprintf(pg_result, "Wrong command format"); return -EINVAL; } max = count; len = count_trail_chars(user_buffer, max); if (len < 0) return len; i = len; /* Read variable name */ len = strn_len(&user_buffer[i], sizeof(name) - 1); if (len < 0) return len; memset(name, 0, sizeof(name)); if (copy_from_user(name, &user_buffer[i], len)) return -EFAULT; i += len; max = count - i; len = count_trail_chars(&user_buffer[i], max); if (len < 0) return len; i += len; if (debug) pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); if (!t) { pr_err("ERROR: No thread\n"); ret = -EINVAL; goto out; } pg_result = &(t->result[0]); if (!strcmp(name, "add_device")) { char f[32]; memset(f, 0, 32); len = strn_len(&user_buffer[i], sizeof(f) - 1); if (len < 0) { ret = len; goto out; } if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; i += len; mutex_lock(&pktgen_thread_lock); ret = pktgen_add_device(t, f); mutex_unlock(&pktgen_thread_lock); if (!ret) { ret = count; sprintf(pg_result, "OK: add_device=%s", f); } else sprintf(pg_result, "ERROR: can not add device %s", f); goto out; } if (!strcmp(name, "rem_device_all")) { mutex_lock(&pktgen_thread_lock); t->control |= T_REMDEVALL; mutex_unlock(&pktgen_thread_lock); schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ ret = count; sprintf(pg_result, "OK: rem_device_all"); goto out; } if (!strcmp(name, "max_before_softirq")) { sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use"); ret = count; goto out; } ret = -EINVAL; out: return ret; } static int pktgen_thread_open(struct inode *inode, struct file *file) { return single_open(file, pktgen_thread_show, PDE_DATA(inode)); } static const struct file_operations pktgen_thread_fops = { .open = pktgen_thread_open, .read = seq_read, .llseek = seq_lseek, .write = pktgen_thread_write, .release = single_release, }; /* Think find or remove for NN */ static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn, const char *ifname, int remove) { struct pktgen_thread *t; struct pktgen_dev *pkt_dev = NULL; bool exact = (remove == FIND); list_for_each_entry(t, &pn->pktgen_threads, th_list) { pkt_dev = pktgen_find_dev(t, ifname, exact); if (pkt_dev) { if (remove) { pkt_dev->removal_mark = 1; t->control |= T_REMDEV; } break; } } return pkt_dev; } /* * mark a device for removal */ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) { struct pktgen_dev *pkt_dev = NULL; const int max_tries = 10, msec_per_try = 125; int i = 0; mutex_lock(&pktgen_thread_lock); pr_debug("%s: marking %s for removal\n", __func__, ifname); while (1) { pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE); if (pkt_dev == NULL) break; /* success */ mutex_unlock(&pktgen_thread_lock); pr_debug("%s: waiting for %s to disappear....\n", __func__, ifname); schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); mutex_lock(&pktgen_thread_lock); if (++i >= max_tries) { pr_err("%s: timed out after waiting %d msec for device %s to be removed\n", __func__, msec_per_try * i, ifname); break; } } mutex_unlock(&pktgen_thread_lock); } static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev) { struct pktgen_thread *t; mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) { struct pktgen_dev *pkt_dev; if_lock(t); list_for_each_entry(pkt_dev, &t->if_list, list) { if (pkt_dev->odev != dev) continue; proc_remove(pkt_dev->entry); pkt_dev->entry = proc_create_data(dev->name, 0600, pn->proc_dir, &pktgen_if_fops, pkt_dev); if (!pkt_dev->entry) pr_err("can't move proc entry for '%s'\n", dev->name); break; } if_unlock(t); } mutex_unlock(&pktgen_thread_lock); } static int pktgen_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); if (pn->pktgen_exiting) return NOTIFY_DONE; /* It is OK that we do not hold the group lock right now, * as we run under the RTNL lock. */ switch (event) { case NETDEV_CHANGENAME: pktgen_change_name(pn, dev); break; case NETDEV_UNREGISTER: pktgen_mark_device(pn, dev->name); break; } return NOTIFY_DONE; } static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn, struct pktgen_dev *pkt_dev, const char *ifname) { char b[IFNAMSIZ+5]; int i; for (i = 0; ifname[i] != '@'; i++) { if (i == IFNAMSIZ) break; b[i] = ifname[i]; } b[i] = 0; return dev_get_by_name(pn->net, b); } /* Associate pktgen_dev with a device. */ static int pktgen_setup_dev(const struct pktgen_net *pn, struct pktgen_dev *pkt_dev, const char *ifname) { struct net_device *odev; int err; /* Clean old setups */ if (pkt_dev->odev) { dev_put(pkt_dev->odev); pkt_dev->odev = NULL; } odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname); if (!odev) { pr_err("no such netdevice: \"%s\"\n", ifname); return -ENODEV; } if (odev->type != ARPHRD_ETHER) { pr_err("not an ethernet device: \"%s\"\n", ifname); err = -EINVAL; } else if (!netif_running(odev)) { pr_err("device is down: \"%s\"\n", ifname); err = -ENETDOWN; } else { pkt_dev->odev = odev; return 0; } dev_put(odev); return err; } /* Read pkt_dev from the interface and set up internal pktgen_dev * structure to have the right information to create/send packets */ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) { int ntxq; if (!pkt_dev->odev) { pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n"); sprintf(pkt_dev->result, "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); return; } /* make sure that we don't pick a non-existing transmit queue */ ntxq = pkt_dev->odev->real_num_tx_queues; if (ntxq <= pkt_dev->queue_map_min) { pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, pkt_dev->odevname); pkt_dev->queue_map_min = (ntxq ?: 1) - 1; } if (pkt_dev->queue_map_max >= ntxq) { pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, pkt_dev->odevname); pkt_dev->queue_map_max = (ntxq ?: 1) - 1; } /* Default to the interface's mac if not explicitly set. */ if (is_zero_ether_addr(pkt_dev->src_mac)) ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr); /* Set up Dest MAC */ ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac); if (pkt_dev->flags & F_IPV6) { int i, set = 0, err = 1; struct inet6_dev *idev; if (pkt_dev->min_pkt_size == 0) { pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) + sizeof(struct udphdr) + sizeof(struct pktgen_hdr) + pkt_dev->pkt_overhead; } for (i = 0; i < sizeof(struct in6_addr); i++) if (pkt_dev->cur_in6_saddr.s6_addr[i]) { set = 1; break; } if (!set) { /* * Use linklevel address if unconfigured. * * use ipv6_get_lladdr if/when it's get exported */ rcu_read_lock(); idev = __in6_dev_get(pkt_dev->odev); if (idev) { struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { if ((ifp->scope & IFA_LINK) && !(ifp->flags & IFA_F_TENTATIVE)) { pkt_dev->cur_in6_saddr = ifp->addr; err = 0; break; } } read_unlock_bh(&idev->lock); } rcu_read_unlock(); if (err) pr_err("ERROR: IPv6 link address not available\n"); } } else { if (pkt_dev->min_pkt_size == 0) { pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(struct pktgen_hdr) + pkt_dev->pkt_overhead; } pkt_dev->saddr_min = 0; pkt_dev->saddr_max = 0; if (strlen(pkt_dev->src_min) == 0) { struct in_device *in_dev; rcu_read_lock(); in_dev = __in_dev_get_rcu(pkt_dev->odev); if (in_dev) { if (in_dev->ifa_list) { pkt_dev->saddr_min = in_dev->ifa_list->ifa_address; pkt_dev->saddr_max = pkt_dev->saddr_min; } } rcu_read_unlock(); } else { pkt_dev->saddr_min = in_aton(pkt_dev->src_min); pkt_dev->saddr_max = in_aton(pkt_dev->src_max); } pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); } /* Initialize current values. */ pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; pkt_dev->cur_dst_mac_offset = 0; pkt_dev->cur_src_mac_offset = 0; pkt_dev->cur_saddr = pkt_dev->saddr_min; pkt_dev->cur_daddr = pkt_dev->daddr_min; pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; pkt_dev->cur_udp_src = pkt_dev->udp_src_min; pkt_dev->nflows = 0; } static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) { ktime_t start_time, end_time; s64 remaining; struct hrtimer_sleeper t; hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); if (remaining <= 0) goto out; start_time = ktime_get(); if (remaining < 100000) { /* for small delays (<100us), just loop until limit is reached */ do { end_time = ktime_get(); } while (ktime_compare(end_time, spin_until) < 0); } else { /* see do_nanosleep */ hrtimer_init_sleeper(&t, current); do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); if (likely(t.task)) schedule(); hrtimer_cancel(&t.timer); } while (t.task && pkt_dev->running && !signal_pending(current)); __set_current_state(TASK_RUNNING); end_time = ktime_get(); } pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); out: pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); destroy_hrtimer_on_stack(&t.timer); } static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) { pkt_dev->pkt_overhead = 0; pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); } static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) { return !!(pkt_dev->flows[flow].flags & F_INIT); } static inline int f_pick(struct pktgen_dev *pkt_dev) { int flow = pkt_dev->curfl; if (pkt_dev->flags & F_FLOW_SEQ) { if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { /* reset time */ pkt_dev->flows[flow].count = 0; pkt_dev->flows[flow].flags = 0; pkt_dev->curfl += 1; if (pkt_dev->curfl >= pkt_dev->cflows) pkt_dev->curfl = 0; /*reset */ } } else { flow = prandom_u32() % pkt_dev->cflows; pkt_dev->curfl = flow; if (pkt_dev->flows[flow].count > pkt_dev->lflow) { pkt_dev->flows[flow].count = 0; pkt_dev->flows[flow].flags = 0; } } return pkt_dev->curfl; } #ifdef CONFIG_XFRM /* If there was already an IPSEC SA, we keep it as is, else * we go look for it ... */ #define DUMMY_MARK 0 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) { struct xfrm_state *x = pkt_dev->flows[flow].x; struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id); if (!x) { if (pkt_dev->spi) { /* We need as quick as possible to find the right SA * Searching with minimum criteria to archieve this. */ x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET); } else { /* slow path: we dont already have xfrm_state */ x = xfrm_stateonly_find(pn->net, DUMMY_MARK, (xfrm_address_t *)&pkt_dev->cur_daddr, (xfrm_address_t *)&pkt_dev->cur_saddr, AF_INET, pkt_dev->ipsmode, pkt_dev->ipsproto, 0); } if (x) { pkt_dev->flows[flow].x = x; set_pkt_overhead(pkt_dev); pkt_dev->pkt_overhead += x->props.header_len; } } } #endif static void set_cur_queue_map(struct pktgen_dev *pkt_dev) { if (pkt_dev->flags & F_QUEUE_MAP_CPU) pkt_dev->cur_queue_map = smp_processor_id(); else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { __u16 t; if (pkt_dev->flags & F_QUEUE_MAP_RND) { t = prandom_u32() % (pkt_dev->queue_map_max - pkt_dev->queue_map_min + 1) + pkt_dev->queue_map_min; } else { t = pkt_dev->cur_queue_map + 1; if (t > pkt_dev->queue_map_max) t = pkt_dev->queue_map_min; } pkt_dev->cur_queue_map = t; } pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; } /* Increment/randomize headers according to flags and current values * for IP src/dest, UDP src/dst port, MAC-Addr src/dst */ static void mod_cur_headers(struct pktgen_dev *pkt_dev) { __u32 imn; __u32 imx; int flow = 0; if (pkt_dev->cflows) flow = f_pick(pkt_dev); /* Deal with source MAC */ if (pkt_dev->src_mac_count > 1) { __u32 mc; __u32 tmp; if (pkt_dev->flags & F_MACSRC_RND) mc = prandom_u32() % pkt_dev->src_mac_count; else { mc = pkt_dev->cur_src_mac_offset++; if (pkt_dev->cur_src_mac_offset >= pkt_dev->src_mac_count) pkt_dev->cur_src_mac_offset = 0; } tmp = pkt_dev->src_mac[5] + (mc & 0xFF); pkt_dev->hh[11] = tmp; tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); pkt_dev->hh[10] = tmp; tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); pkt_dev->hh[9] = tmp; tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); pkt_dev->hh[8] = tmp; tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); pkt_dev->hh[7] = tmp; } /* Deal with Destination MAC */ if (pkt_dev->dst_mac_count > 1) { __u32 mc; __u32 tmp; if (pkt_dev->flags & F_MACDST_RND) mc = prandom_u32() % pkt_dev->dst_mac_count; else { mc = pkt_dev->cur_dst_mac_offset++; if (pkt_dev->cur_dst_mac_offset >= pkt_dev->dst_mac_count) { pkt_dev->cur_dst_mac_offset = 0; } } tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); pkt_dev->hh[5] = tmp; tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); pkt_dev->hh[4] = tmp; tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); pkt_dev->hh[3] = tmp; tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); pkt_dev->hh[2] = tmp; tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); pkt_dev->hh[1] = tmp; } if (pkt_dev->flags & F_MPLS_RND) { unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) pkt_dev->labels[i] = MPLS_STACK_BOTTOM | ((__force __be32)prandom_u32() & htonl(0x000fffff)); } if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_id = prandom_u32() & (4096 - 1); } if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_id = prandom_u32() & (4096 - 1); } if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { if (pkt_dev->flags & F_UDPSRC_RND) pkt_dev->cur_udp_src = prandom_u32() % (pkt_dev->udp_src_max - pkt_dev->udp_src_min) + pkt_dev->udp_src_min; else { pkt_dev->cur_udp_src++; if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) pkt_dev->cur_udp_src = pkt_dev->udp_src_min; } } if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { if (pkt_dev->flags & F_UDPDST_RND) { pkt_dev->cur_udp_dst = prandom_u32() % (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) + pkt_dev->udp_dst_min; } else { pkt_dev->cur_udp_dst++; if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; } } if (!(pkt_dev->flags & F_IPV6)) { imn = ntohl(pkt_dev->saddr_min); imx = ntohl(pkt_dev->saddr_max); if (imn < imx) { __u32 t; if (pkt_dev->flags & F_IPSRC_RND) t = prandom_u32() % (imx - imn) + imn; else { t = ntohl(pkt_dev->cur_saddr); t++; if (t > imx) t = imn; } pkt_dev->cur_saddr = htonl(t); } if (pkt_dev->cflows && f_seen(pkt_dev, flow)) { pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; } else { imn = ntohl(pkt_dev->daddr_min); imx = ntohl(pkt_dev->daddr_max); if (imn < imx) { __u32 t; __be32 s; if (pkt_dev->flags & F_IPDST_RND) { do { t = prandom_u32() % (imx - imn) + imn; s = htonl(t); } while (ipv4_is_loopback(s) || ipv4_is_multicast(s) || ipv4_is_lbcast(s) || ipv4_is_zeronet(s) || ipv4_is_local_multicast(s)); pkt_dev->cur_daddr = s; } else { t = ntohl(pkt_dev->cur_daddr); t++; if (t > imx) { t = imn; } pkt_dev->cur_daddr = htonl(t); } } if (pkt_dev->cflows) { pkt_dev->flows[flow].flags |= F_INIT; pkt_dev->flows[flow].cur_daddr = pkt_dev->cur_daddr; #ifdef CONFIG_XFRM if (pkt_dev->flags & F_IPSEC) get_ipsec_sa(pkt_dev, flow); #endif pkt_dev->nflows++; } } } else { /* IPV6 * */ if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) { int i; /* Only random destinations yet */ for (i = 0; i < 4; i++) { pkt_dev->cur_in6_daddr.s6_addr32[i] = (((__force __be32)prandom_u32() | pkt_dev->min_in6_daddr.s6_addr32[i]) & pkt_dev->max_in6_daddr.s6_addr32[i]); } } } if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { __u32 t; if (pkt_dev->flags & F_TXSIZE_RND) { t = prandom_u32() % (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size) + pkt_dev->min_pkt_size; } else { t = pkt_dev->cur_pkt_size + 1; if (t > pkt_dev->max_pkt_size) t = pkt_dev->min_pkt_size; } pkt_dev->cur_pkt_size = t; } set_cur_queue_map(pkt_dev); pkt_dev->flows[flow].count++; } #ifdef CONFIG_XFRM static u32 pktgen_dst_metrics[RTAX_MAX + 1] = { [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */ }; static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) { struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; int err = 0; struct net *net = dev_net(pkt_dev->odev); if (!x) return 0; /* XXX: we dont support tunnel mode for now until * we resolve the dst issue */ if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0)) return 0; /* But when user specify an valid SPI, transformation * supports both transport/tunnel mode + ESP/AH type. */ if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0)) skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF; rcu_read_lock_bh(); err = x->outer_mode->output(x, skb); rcu_read_unlock_bh(); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); goto error; } err = x->type->output(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); goto error; } spin_lock_bh(&x->lock); x->curlft.bytes += skb->len; x->curlft.packets++; spin_unlock_bh(&x->lock); error: return err; } static void free_SAs(struct pktgen_dev *pkt_dev) { if (pkt_dev->cflows) { /* let go of the SAs if we have them */ int i; for (i = 0; i < pkt_dev->cflows; i++) { struct xfrm_state *x = pkt_dev->flows[i].x; if (x) { xfrm_state_put(x); pkt_dev->flows[i].x = NULL; } } } } static int process_ipsec(struct pktgen_dev *pkt_dev, struct sk_buff *skb, __be16 protocol) { if (pkt_dev->flags & F_IPSEC) { struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; int nhead = 0; if (x) { struct ethhdr *eth; struct iphdr *iph; int ret; nhead = x->props.header_len - skb_headroom(skb); if (nhead > 0) { ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); if (ret < 0) { pr_err("Error expanding ipsec packet %d\n", ret); goto err; } } /* ipsec is not expecting ll header */ skb_pull(skb, ETH_HLEN); ret = pktgen_output_ipsec(skb, pkt_dev); if (ret) { pr_err("Error creating ipsec packet %d\n", ret); goto err; } /* restore ll */ eth = skb_push(skb, ETH_HLEN); memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN); eth->h_proto = protocol; /* Update IPv4 header len as well as checksum value */ iph = ip_hdr(skb); iph->tot_len = htons(skb->len - ETH_HLEN); ip_send_check(iph); } } return 1; err: kfree_skb(skb); return 0; } #endif static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) { unsigned int i; for (i = 0; i < pkt_dev->nr_labels; i++) *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; mpls--; *mpls |= MPLS_STACK_BOTTOM; } static inline __be16 build_tci(unsigned int id, unsigned int cfi, unsigned int prio) { return htons(id | (cfi << 12) | (prio << 13)); } static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, int datalen) { struct timespec64 timestamp; struct pktgen_hdr *pgh; pgh = skb_put(skb, sizeof(*pgh)); datalen -= sizeof(*pgh); if (pkt_dev->nfrags <= 0) { skb_put_zero(skb, datalen); } else { int frags = pkt_dev->nfrags; int i, len; int frag_len; if (frags > MAX_SKB_FRAGS) frags = MAX_SKB_FRAGS; len = datalen - frags * PAGE_SIZE; if (len > 0) { skb_put_zero(skb, len); datalen = frags * PAGE_SIZE; } i = 0; frag_len = (datalen/frags) < PAGE_SIZE ? (datalen/frags) : PAGE_SIZE; while (datalen > 0) { if (unlikely(!pkt_dev->page)) { int node = numa_node_id(); if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE)) node = pkt_dev->node; pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); if (!pkt_dev->page) break; } get_page(pkt_dev->page); skb_frag_set_page(skb, i, pkt_dev->page); skb_shinfo(skb)->frags[i].page_offset = 0; /*last fragment, fill rest of data*/ if (i == (frags - 1)) skb_frag_size_set(&skb_shinfo(skb)->frags[i], (datalen < PAGE_SIZE ? datalen : PAGE_SIZE)); else skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len); datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); i++; skb_shinfo(skb)->nr_frags = i; } } /* Stamp the time, and sequence number, * convert them to network byte order */ pgh->pgh_magic = htonl(PKTGEN_MAGIC); pgh->seq_num = htonl(pkt_dev->seq_num); if (pkt_dev->flags & F_NO_TIMESTAMP) { pgh->tv_sec = 0; pgh->tv_usec = 0; } else { /* * pgh->tv_sec wraps in y2106 when interpreted as unsigned * as done by wireshark, or y2038 when interpreted as signed. * This is probably harmless, but if anyone wants to improve * it, we could introduce a variant that puts 64-bit nanoseconds * into the respective header bytes. * This would also be slightly faster to read. */ ktime_get_real_ts64(&timestamp); pgh->tv_sec = htonl(timestamp.tv_sec); pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC); } } static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, struct pktgen_dev *pkt_dev) { unsigned int extralen = LL_RESERVED_SPACE(dev); struct sk_buff *skb = NULL; unsigned int size; size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead; if (pkt_dev->flags & F_NODE) { int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node); if (likely(skb)) { skb_reserve(skb, NET_SKB_PAD); skb->dev = dev; } } else { skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); } /* the caller pre-fetches from skb->data and reserves for the mac hdr */ if (likely(skb)) skb_reserve(skb, extralen - 16); return skb; } static struct sk_buff *fill_packet_ipv4(struct net_device *odev, struct pktgen_dev *pkt_dev) { struct sk_buff *skb = NULL; __u8 *eth; struct udphdr *udph; int datalen, iplen; struct iphdr *iph; __be16 protocol = htons(ETH_P_IP); __be32 *mpls; __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ u16 queue_map; if (pkt_dev->nr_labels) protocol = htons(ETH_P_MPLS_UC); if (pkt_dev->vlan_id != 0xffff) protocol = htons(ETH_P_8021Q); /* Update any of the values, used when we're incrementing various * fields. */ mod_cur_headers(pkt_dev); queue_map = pkt_dev->cur_queue_map; skb = pktgen_alloc_skb(odev, pkt_dev); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; } prefetchw(skb->data); skb_reserve(skb, 16); /* Reserve for ethernet and IP header */ eth = skb_push(skb, 14); mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); if (pkt_dev->nr_labels) mpls_push(mpls, pkt_dev); if (pkt_dev->vlan_id != 0xffff) { if (pkt_dev->svlan_id != 0xffff) { svlan_tci = skb_put(skb, sizeof(__be16)); *svlan_tci = build_tci(pkt_dev->svlan_id, pkt_dev->svlan_cfi, pkt_dev->svlan_p); svlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); *svlan_encapsulated_proto = htons(ETH_P_8021Q); } vlan_tci = skb_put(skb, sizeof(__be16)); *vlan_tci = build_tci(pkt_dev->vlan_id, pkt_dev->vlan_cfi, pkt_dev->vlan_p); vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); *vlan_encapsulated_proto = htons(ETH_P_IP); } skb_reset_mac_header(skb); skb_set_network_header(skb, skb->len); iph = skb_put(skb, sizeof(struct iphdr)); skb_set_transport_header(skb, skb->len); udph = skb_put(skb, sizeof(struct udphdr)); skb_set_queue_mapping(skb, queue_map); skb->priority = pkt_dev->skb_priority; memcpy(eth, pkt_dev->hh, 12); *(__be16 *) & eth[12] = protocol; /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - pkt_dev->pkt_overhead; if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) datalen = sizeof(struct pktgen_hdr); udph->source = htons(pkt_dev->cur_udp_src); udph->dest = htons(pkt_dev->cur_udp_dst); udph->len = htons(datalen + 8); /* DATA + udphdr */ udph->check = 0; iph->ihl = 5; iph->version = 4; iph->ttl = 32; iph->tos = pkt_dev->tos; iph->protocol = IPPROTO_UDP; /* UDP */ iph->saddr = pkt_dev->cur_saddr; iph->daddr = pkt_dev->cur_daddr; iph->id = htons(pkt_dev->ip_id); pkt_dev->ip_id++; iph->frag_off = 0; iplen = 20 + 8 + datalen; iph->tot_len = htons(iplen); ip_send_check(iph); skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; pktgen_finalize_skb(pkt_dev, skb, datalen); if (!(pkt_dev->flags & F_UDPCSUM)) { skb->ip_summed = CHECKSUM_NONE; } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; udp4_hwcsum(skb, iph->saddr, iph->daddr); } else { __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); /* add protocol-dependent pseudo-header */ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, datalen + 8, IPPROTO_UDP, csum); if (udph->check == 0) udph->check = CSUM_MANGLED_0; } #ifdef CONFIG_XFRM if (!process_ipsec(pkt_dev, skb, protocol)) return NULL; #endif return skb; } static struct sk_buff *fill_packet_ipv6(struct net_device *odev, struct pktgen_dev *pkt_dev) { struct sk_buff *skb = NULL; __u8 *eth; struct udphdr *udph; int datalen, udplen; struct ipv6hdr *iph; __be16 protocol = htons(ETH_P_IPV6); __be32 *mpls; __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ u16 queue_map; if (pkt_dev->nr_labels) protocol = htons(ETH_P_MPLS_UC); if (pkt_dev->vlan_id != 0xffff) protocol = htons(ETH_P_8021Q); /* Update any of the values, used when we're incrementing various * fields. */ mod_cur_headers(pkt_dev); queue_map = pkt_dev->cur_queue_map; skb = pktgen_alloc_skb(odev, pkt_dev); if (!skb) { sprintf(pkt_dev->result, "No memory"); return NULL; } prefetchw(skb->data); skb_reserve(skb, 16); /* Reserve for ethernet and IP header */ eth = skb_push(skb, 14); mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); if (pkt_dev->nr_labels) mpls_push(mpls, pkt_dev); if (pkt_dev->vlan_id != 0xffff) { if (pkt_dev->svlan_id != 0xffff) { svlan_tci = skb_put(skb, sizeof(__be16)); *svlan_tci = build_tci(pkt_dev->svlan_id, pkt_dev->svlan_cfi, pkt_dev->svlan_p); svlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); *svlan_encapsulated_proto = htons(ETH_P_8021Q); } vlan_tci = skb_put(skb, sizeof(__be16)); *vlan_tci = build_tci(pkt_dev->vlan_id, pkt_dev->vlan_cfi, pkt_dev->vlan_p); vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); *vlan_encapsulated_proto = htons(ETH_P_IPV6); } skb_reset_mac_header(skb); skb_set_network_header(skb, skb->len); iph = skb_put(skb, sizeof(struct ipv6hdr)); skb_set_transport_header(skb, skb->len); udph = skb_put(skb, sizeof(struct udphdr)); skb_set_queue_mapping(skb, queue_map); skb->priority = pkt_dev->skb_priority; memcpy(eth, pkt_dev->hh, 12); *(__be16 *) &eth[12] = protocol; /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - sizeof(struct ipv6hdr) - sizeof(struct udphdr) - pkt_dev->pkt_overhead; if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); net_info_ratelimited("increased datalen to %d\n", datalen); } udplen = datalen + sizeof(struct udphdr); udph->source = htons(pkt_dev->cur_udp_src); udph->dest = htons(pkt_dev->cur_udp_dst); udph->len = htons(udplen); udph->check = 0; *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ if (pkt_dev->traffic_class) { /* Version + traffic class + flow (0) */ *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); } iph->hop_limit = 32; iph->payload_len = htons(udplen); iph->nexthdr = IPPROTO_UDP; iph->daddr = pkt_dev->cur_in6_daddr; iph->saddr = pkt_dev->cur_in6_saddr; skb->protocol = protocol; skb->dev = odev; skb->pkt_type = PACKET_HOST; pktgen_finalize_skb(pkt_dev, skb, datalen); if (!(pkt_dev->flags & F_UDPCSUM)) { skb->ip_summed = CHECKSUM_NONE; } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); } else { __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); /* add protocol-dependent pseudo-header */ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); if (udph->check == 0) udph->check = CSUM_MANGLED_0; } return skb; } static struct sk_buff *fill_packet(struct net_device *odev, struct pktgen_dev *pkt_dev) { if (pkt_dev->flags & F_IPV6) return fill_packet_ipv6(odev, pkt_dev); else return fill_packet_ipv4(odev, pkt_dev); } static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) { pkt_dev->seq_num = 1; pkt_dev->idle_acc = 0; pkt_dev->sofar = 0; pkt_dev->tx_bytes = 0; pkt_dev->errors = 0; } /* Set up structure for sending pkts, clear counters */ static void pktgen_run(struct pktgen_thread *t) { struct pktgen_dev *pkt_dev; int started = 0; func_enter(); rcu_read_lock(); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { /* * setup odev and create initial packet. */ pktgen_setup_inject(pkt_dev); if (pkt_dev->odev) { pktgen_clear_counters(pkt_dev); pkt_dev->skb = NULL; pkt_dev->started_at = pkt_dev->next_tx = ktime_get(); set_pkt_overhead(pkt_dev); strcpy(pkt_dev->result, "Starting"); pkt_dev->running = 1; /* Cranke yeself! */ started++; } else strcpy(pkt_dev->result, "Error starting"); } rcu_read_unlock(); if (started) t->control &= ~(T_STOP); } static void pktgen_stop_all_threads_ifs(struct pktgen_net *pn) { struct pktgen_thread *t; func_enter(); mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) t->control |= T_STOP; mutex_unlock(&pktgen_thread_lock); } static int thread_is_running(const struct pktgen_thread *t) { const struct pktgen_dev *pkt_dev; rcu_read_lock(); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) if (pkt_dev->running) { rcu_read_unlock(); return 1; } rcu_read_unlock(); return 0; } static int pktgen_wait_thread_run(struct pktgen_thread *t) { while (thread_is_running(t)) { msleep_interruptible(100); if (signal_pending(current)) goto signal; } return 1; signal: return 0; } static int pktgen_wait_all_threads_run(struct pktgen_net *pn) { struct pktgen_thread *t; int sig = 1; mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) { sig = pktgen_wait_thread_run(t); if (sig == 0) break; } if (sig == 0) list_for_each_entry(t, &pn->pktgen_threads, th_list) t->control |= (T_STOP); mutex_unlock(&pktgen_thread_lock); return sig; } static void pktgen_run_all_threads(struct pktgen_net *pn) { struct pktgen_thread *t; func_enter(); mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) t->control |= (T_RUN); mutex_unlock(&pktgen_thread_lock); /* Propagate thread->control */ schedule_timeout_interruptible(msecs_to_jiffies(125)); pktgen_wait_all_threads_run(pn); } static void pktgen_reset_all_threads(struct pktgen_net *pn) { struct pktgen_thread *t; func_enter(); mutex_lock(&pktgen_thread_lock); list_for_each_entry(t, &pn->pktgen_threads, th_list) t->control |= (T_REMDEVALL); mutex_unlock(&pktgen_thread_lock); /* Propagate thread->control */ schedule_timeout_interruptible(msecs_to_jiffies(125)); pktgen_wait_all_threads_run(pn); } static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) { __u64 bps, mbps, pps; char *p = pkt_dev->result; ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, pkt_dev->started_at); ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", (unsigned long long)ktime_to_us(elapsed), (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), (unsigned long long)ktime_to_us(idle), (unsigned long long)pkt_dev->sofar, pkt_dev->cur_pkt_size, nr_frags); pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, ktime_to_ns(elapsed)); bps = pps * 8 * pkt_dev->cur_pkt_size; mbps = bps; do_div(mbps, 1000000); p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", (unsigned long long)pps, (unsigned long long)mbps, (unsigned long long)bps, (unsigned long long)pkt_dev->errors); } /* Set stopped-at timer, remove from running list, do counters & statistics */ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) { int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; if (!pkt_dev->running) { pr_warn("interface: %s is already stopped\n", pkt_dev->odevname); return -EINVAL; } pkt_dev->running = 0; kfree_skb(pkt_dev->skb); pkt_dev->skb = NULL; pkt_dev->stopped_at = ktime_get(); show_results(pkt_dev, nr_frags); return 0; } static struct pktgen_dev *next_to_run(struct pktgen_thread *t) { struct pktgen_dev *pkt_dev, *best = NULL; rcu_read_lock(); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { if (!pkt_dev->running) continue; if (best == NULL) best = pkt_dev; else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0) best = pkt_dev; } rcu_read_unlock(); return best; } static void pktgen_stop(struct pktgen_thread *t) { struct pktgen_dev *pkt_dev; func_enter(); rcu_read_lock(); list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { pktgen_stop_device(pkt_dev); } rcu_read_unlock(); } /* * one of our devices needs to be removed - find it * and remove it */ static void pktgen_rem_one_if(struct pktgen_thread *t) { struct list_head *q, *n; struct pktgen_dev *cur; func_enter(); list_for_each_safe(q, n, &t->if_list) { cur = list_entry(q, struct pktgen_dev, list); if (!cur->removal_mark) continue; kfree_skb(cur->skb); cur->skb = NULL; pktgen_remove_device(t, cur); break; } } static void pktgen_rem_all_ifs(struct pktgen_thread *t) { struct list_head *q, *n; struct pktgen_dev *cur; func_enter(); /* Remove all devices, free mem */ list_for_each_safe(q, n, &t->if_list) { cur = list_entry(q, struct pktgen_dev, list); kfree_skb(cur->skb); cur->skb = NULL; pktgen_remove_device(t, cur); } } static void pktgen_rem_thread(struct pktgen_thread *t) { /* Remove from the thread list */ remove_proc_entry(t->tsk->comm, t->net->proc_dir); } static void pktgen_resched(struct pktgen_dev *pkt_dev) { ktime_t idle_start = ktime_get(); schedule(); pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); } static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) { ktime_t idle_start = ktime_get(); while (refcount_read(&(pkt_dev->skb->users)) != 1) { if (signal_pending(current)) break; if (need_resched()) pktgen_resched(pkt_dev); else cpu_relax(); } pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); } static void pktgen_xmit(struct pktgen_dev *pkt_dev) { unsigned int burst = READ_ONCE(pkt_dev->burst); struct net_device *odev = pkt_dev->odev; struct netdev_queue *txq; struct sk_buff *skb; int ret; /* If device is offline, then don't send */ if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { pktgen_stop_device(pkt_dev); return; } /* This is max DELAY, this has special meaning of * "never transmit" */ if (unlikely(pkt_dev->delay == ULLONG_MAX)) { pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX); return; } /* If no skb or clone count exhausted then get new one */ if (!pkt_dev->skb || (pkt_dev->last_ok && ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { /* build a new pkt */ kfree_skb(pkt_dev->skb); pkt_dev->skb = fill_packet(odev, pkt_dev); if (pkt_dev->skb == NULL) { pr_err("ERROR: couldn't allocate skb in fill_packet\n"); schedule(); pkt_dev->clone_count--; /* back out increment, OOM */ return; } pkt_dev->last_pkt_size = pkt_dev->skb->len; pkt_dev->clone_count = 0; /* reset counter */ } if (pkt_dev->delay && pkt_dev->last_ok) spin(pkt_dev, pkt_dev->next_tx); if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { skb = pkt_dev->skb; skb->protocol = eth_type_trans(skb, skb->dev); refcount_add(burst, &skb->users); local_bh_disable(); do { ret = netif_receive_skb(skb); if (ret == NET_RX_DROP) pkt_dev->errors++; pkt_dev->sofar++; pkt_dev->seq_num++; if (refcount_read(&skb->users) != burst) { /* skb was queued by rps/rfs or taps, * so cannot reuse this skb */ WARN_ON(refcount_sub_and_test(burst - 1, &skb->users)); /* get out of the loop and wait * until skb is consumed */ break; } /* skb was 'freed' by stack, so clean few * bits and reuse it */ skb_reset_tc(skb); } while (--burst > 0); goto out; /* Skips xmit_mode M_START_XMIT */ } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { local_bh_disable(); refcount_inc(&pkt_dev->skb->users); ret = dev_queue_xmit(pkt_dev->skb); switch (ret) { case NET_XMIT_SUCCESS: pkt_dev->sofar++; pkt_dev->seq_num++; pkt_dev->tx_bytes += pkt_dev->last_pkt_size; break; case NET_XMIT_DROP: case NET_XMIT_CN: /* These are all valid return codes for a qdisc but * indicate packets are being dropped or will likely * be dropped soon. */ case NETDEV_TX_BUSY: /* qdisc may call dev_hard_start_xmit directly in cases * where no queues exist e.g. loopback device, virtual * devices, etc. In this case we need to handle * NETDEV_TX_ codes. */ default: pkt_dev->errors++; net_info_ratelimited("%s xmit error: %d\n", pkt_dev->odevname, ret); break; } goto out; } txq = skb_get_tx_queue(odev, pkt_dev->skb); local_bh_disable(); HARD_TX_LOCK(odev, txq, smp_processor_id()); if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { ret = NETDEV_TX_BUSY; pkt_dev->last_ok = 0; goto unlock; } refcount_add(burst, &pkt_dev->skb->users); xmit_more: ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); switch (ret) { case NETDEV_TX_OK: pkt_dev->last_ok = 1; pkt_dev->sofar++; pkt_dev->seq_num++; pkt_dev->tx_bytes += pkt_dev->last_pkt_size; if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) goto xmit_more; break; case NET_XMIT_DROP: case NET_XMIT_CN: /* skb has been consumed */ pkt_dev->errors++; break; default: /* Drivers are not supposed to return other values! */ net_info_ratelimited("%s xmit error: %d\n", pkt_dev->odevname, ret); pkt_dev->errors++; /* fallthru */ case NETDEV_TX_BUSY: /* Retry it next time */ refcount_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0; } if (unlikely(burst)) WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users)); unlock: HARD_TX_UNLOCK(odev, txq); out: local_bh_enable(); /* If pkt_dev->count is zero, then run forever */ if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { pktgen_wait_for_skb(pkt_dev); /* Done with this */ pktgen_stop_device(pkt_dev); } } /* * Main loop of the thread goes here */ static int pktgen_thread_worker(void *arg) { DEFINE_WAIT(wait); struct pktgen_thread *t = arg; struct pktgen_dev *pkt_dev = NULL; int cpu = t->cpu; BUG_ON(smp_processor_id() != cpu); init_waitqueue_head(&t->queue); complete(&t->start_done); pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); set_freezable(); while (!kthread_should_stop()) { pkt_dev = next_to_run(t); if (unlikely(!pkt_dev && t->control == 0)) { if (t->net->pktgen_exiting) break; wait_event_interruptible_timeout(t->queue, t->control != 0, HZ/10); try_to_freeze(); continue; } if (likely(pkt_dev)) { pktgen_xmit(pkt_dev); if (need_resched()) pktgen_resched(pkt_dev); else cpu_relax(); } if (t->control & T_STOP) { pktgen_stop(t); t->control &= ~(T_STOP); } if (t->control & T_RUN) { pktgen_run(t); t->control &= ~(T_RUN); } if (t->control & T_REMDEVALL) { pktgen_rem_all_ifs(t); t->control &= ~(T_REMDEVALL); } if (t->control & T_REMDEV) { pktgen_rem_one_if(t); t->control &= ~(T_REMDEV); } try_to_freeze(); } pr_debug("%s stopping all device\n", t->tsk->comm); pktgen_stop(t); pr_debug("%s removing all device\n", t->tsk->comm); pktgen_rem_all_ifs(t); pr_debug("%s removing thread\n", t->tsk->comm); pktgen_rem_thread(t); return 0; } static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char *ifname, bool exact) { struct pktgen_dev *p, *pkt_dev = NULL; size_t len = strlen(ifname); rcu_read_lock(); list_for_each_entry_rcu(p, &t->if_list, list) if (strncmp(p->odevname, ifname, len) == 0) { if (p->odevname[len]) { if (exact || p->odevname[len] != '@') continue; } pkt_dev = p; break; } rcu_read_unlock(); pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev); return pkt_dev; } /* * Adds a dev at front of if_list. */ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev) { int rv = 0; /* This function cannot be called concurrently, as its called * under pktgen_thread_lock mutex, but it can run from * userspace on another CPU than the kthread. The if_lock() * is used here to sync with concurrent instances of * _rem_dev_from_if_list() invoked via kthread, which is also * updating the if_list */ if_lock(t); if (pkt_dev->pg_thread) { pr_err("ERROR: already assigned to a thread\n"); rv = -EBUSY; goto out; } pkt_dev->running = 0; pkt_dev->pg_thread = t; list_add_rcu(&pkt_dev->list, &t->if_list); out: if_unlock(t); return rv; } /* Called under thread lock */ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) { struct pktgen_dev *pkt_dev; int err; int node = cpu_to_node(t->cpu); /* We don't allow a device to be on several threads */ pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND); if (pkt_dev) { pr_err("ERROR: interface already used\n"); return -EBUSY; } pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node); if (!pkt_dev) return -ENOMEM; strcpy(pkt_dev->odevname, ifname); pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state), node); if (pkt_dev->flows == NULL) { kfree(pkt_dev); return -ENOMEM; } pkt_dev->removal_mark = 0; pkt_dev->nfrags = 0; pkt_dev->delay = pg_delay_d; pkt_dev->count = pg_count_d; pkt_dev->sofar = 0; pkt_dev->udp_src_min = 9; /* sink port */ pkt_dev->udp_src_max = 9; pkt_dev->udp_dst_min = 9; pkt_dev->udp_dst_max = 9; pkt_dev->vlan_p = 0; pkt_dev->vlan_cfi = 0; pkt_dev->vlan_id = 0xffff; pkt_dev->svlan_p = 0; pkt_dev->svlan_cfi = 0; pkt_dev->svlan_id = 0xffff; pkt_dev->burst = 1; pkt_dev->node = -1; err = pktgen_setup_dev(t->net, pkt_dev, ifname); if (err) goto out1; if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING) pkt_dev->clone_skb = pg_clone_skb_d; pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir, &pktgen_if_fops, pkt_dev); if (!pkt_dev->entry) { pr_err("cannot create %s/%s procfs entry\n", PG_PROC_DIR, ifname); err = -EINVAL; goto out2; } #ifdef CONFIG_XFRM pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; pkt_dev->ipsproto = IPPROTO_ESP; /* xfrm tunnel mode needs additional dst to extract outter * ip header protocol/ttl/id field, here creat a phony one. * instead of looking for a valid rt, which definitely hurting * performance under such circumstance. */ pkt_dev->dstops.family = AF_INET; pkt_dev->xdst.u.dst.dev = pkt_dev->odev; dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false); pkt_dev->xdst.child = &pkt_dev->xdst.u.dst; pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops; #endif return add_dev_to_thread(t, pkt_dev); out2: dev_put(pkt_dev->odev); out1: #ifdef CONFIG_XFRM free_SAs(pkt_dev); #endif vfree(pkt_dev->flows); kfree(pkt_dev); return err; } static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) { struct pktgen_thread *t; struct proc_dir_entry *pe; struct task_struct *p; t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, cpu_to_node(cpu)); if (!t) { pr_err("ERROR: out of memory, can't create new thread\n"); return -ENOMEM; } mutex_init(&t->if_lock); t->cpu = cpu; INIT_LIST_HEAD(&t->if_list); list_add_tail(&t->th_list, &pn->pktgen_threads); init_completion(&t->start_done); p = kthread_create_on_node(pktgen_thread_worker, t, cpu_to_node(cpu), "kpktgend_%d", cpu); if (IS_ERR(p)) { pr_err("kernel_thread() failed for cpu %d\n", t->cpu); list_del(&t->th_list); kfree(t); return PTR_ERR(p); } kthread_bind(p, cpu); t->tsk = p; pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir, &pktgen_thread_fops, t); if (!pe) { pr_err("cannot create %s/%s procfs entry\n", PG_PROC_DIR, t->tsk->comm); kthread_stop(p); list_del(&t->th_list); kfree(t); return -EINVAL; } t->net = pn; get_task_struct(p); wake_up_process(p); wait_for_completion(&t->start_done); return 0; } /* * Removes a device from the thread if_list. */ static void _rem_dev_from_if_list(struct pktgen_thread *t, struct pktgen_dev *pkt_dev) { struct list_head *q, *n; struct pktgen_dev *p; if_lock(t); list_for_each_safe(q, n, &t->if_list) { p = list_entry(q, struct pktgen_dev, list); if (p == pkt_dev) list_del_rcu(&p->list); } if_unlock(t); } static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_dev) { pr_debug("remove_device pkt_dev=%p\n", pkt_dev); if (pkt_dev->running) { pr_warn("WARNING: trying to remove a running interface, stopping it now\n"); pktgen_stop_device(pkt_dev); } /* Dis-associate from the interface */ if (pkt_dev->odev) { dev_put(pkt_dev->odev); pkt_dev->odev = NULL; } /* Remove proc before if_list entry, because add_device uses * list to determine if interface already exist, avoid race * with proc_create_data() */ proc_remove(pkt_dev->entry); /* And update the thread if_list */ _rem_dev_from_if_list(t, pkt_dev); #ifdef CONFIG_XFRM free_SAs(pkt_dev); #endif vfree(pkt_dev->flows); if (pkt_dev->page) put_page(pkt_dev->page); kfree_rcu(pkt_dev, rcu); return 0; } static int __net_init pg_net_init(struct net *net) { struct pktgen_net *pn = net_generic(net, pg_net_id); struct proc_dir_entry *pe; int cpu, ret = 0; pn->net = net; INIT_LIST_HEAD(&pn->pktgen_threads); pn->pktgen_exiting = false; pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net); if (!pn->proc_dir) { pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR); return -ENODEV; } pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_fops); if (pe == NULL) { pr_err("cannot create %s procfs entry\n", PGCTRL); ret = -EINVAL; goto remove; } for_each_online_cpu(cpu) { int err; err = pktgen_create_thread(cpu, pn); if (err) pr_warn("Cannot create thread for cpu %d (%d)\n", cpu, err); } if (list_empty(&pn->pktgen_threads)) { pr_err("Initialization failed for all threads\n"); ret = -ENODEV; goto remove_entry; } return 0; remove_entry: remove_proc_entry(PGCTRL, pn->proc_dir); remove: remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); return ret; } static void __net_exit pg_net_exit(struct net *net) { struct pktgen_net *pn = net_generic(net, pg_net_id); struct pktgen_thread *t; struct list_head *q, *n; LIST_HEAD(list); /* Stop all interfaces & threads */ pn->pktgen_exiting = true; mutex_lock(&pktgen_thread_lock); list_splice_init(&pn->pktgen_threads, &list); mutex_unlock(&pktgen_thread_lock); list_for_each_safe(q, n, &list) { t = list_entry(q, struct pktgen_thread, th_list); list_del(&t->th_list); kthread_stop(t->tsk); put_task_struct(t->tsk); kfree(t); } remove_proc_entry(PGCTRL, pn->proc_dir); remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); } static struct pernet_operations pg_net_ops = { .init = pg_net_init, .exit = pg_net_exit, .id = &pg_net_id, .size = sizeof(struct pktgen_net), }; static int __init pg_init(void) { int ret = 0; pr_info("%s", version); ret = register_pernet_subsys(&pg_net_ops); if (ret) return ret; ret = register_netdevice_notifier(&pktgen_notifier_block); if (ret) unregister_pernet_subsys(&pg_net_ops); return ret; } static void __exit pg_cleanup(void) { unregister_netdevice_notifier(&pktgen_notifier_block); unregister_pernet_subsys(&pg_net_ops); /* Don't need rcu_barrier() due to use of kfree_rcu() */ } module_init(pg_init); module_exit(pg_cleanup); MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); MODULE_DESCRIPTION("Packet Generator tool"); MODULE_LICENSE("GPL"); MODULE_VERSION(VERSION); module_param(pg_count_d, int, 0); MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); module_param(pg_delay_d, int, 0); MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); module_param(pg_clone_skb_d, int, 0); MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");
gpl-2.0
gimsoodong/tbb
src/test/test_combinable.cpp
18
14082
/* Copyright 2005-2015 Intel Corporation. All Rights Reserved. This file is part of Threading Building Blocks. Threading Building Blocks is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Threading Building Blocks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Threading Building Blocks; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA As a special exception, you may use this file as part of a free software library without restriction. Specifically, if other files instantiate templates or use macros or inline functions from this file, or you compile this file and link it with other files to produce an executable, this file does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #define HARNESS_DEFAULT_MIN_THREADS 0 #define HARNESS_DEFAULT_MAX_THREADS 4 #define __TBB_EXTRA_DEBUG 1 // for concurrent_hash_map #include "tbb/combinable.h" #include "tbb/task_scheduler_init.h" #include "tbb/parallel_for.h" #include "tbb/blocked_range.h" #include "tbb/tick_count.h" #include "tbb/tbb_allocator.h" #include "tbb/tbb_thread.h" #if !TBB_USE_EXCEPTIONS && _MSC_VER // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers #pragma warning (push) #pragma warning (disable: 4530) #endif #include <cstring> #include <vector> #include <utility> #if !TBB_USE_EXCEPTIONS && _MSC_VER #pragma warning (pop) #endif #include "harness_assert.h" #include "harness.h" #if __TBB_GCC_WARNING_SUPPRESSION_PRESENT #pragma GCC diagnostic ignored "-Wuninitialized" #endif static tbb::atomic<int> construction_counter; static tbb::atomic<int> destruction_counter; const int REPETITIONS = 10; const int N = 100000; const double EXPECTED_SUM = (REPETITIONS + 1) * N; // // A minimal class // Define: default and copy constructor, and allow implicit operator& // also operator= // class minimal { private: int my_value; public: minimal(int val=0) : my_value(val) { ++construction_counter; } minimal( const minimal &m ) : my_value(m.my_value) { ++construction_counter; } minimal& operator=(const minimal& other) { my_value = other.my_value; return *this; } minimal& operator+=(const minimal& other) { my_value += other.my_value; return *this; } operator int() const { return my_value; } ~minimal() { ++destruction_counter; } void set_value( const int i ) { my_value = i; } int value( ) const { return my_value; } }; //// functors for initialization and combine // Addition template <typename T> struct FunctorAddFinit { T operator()() { return 0; } }; template <typename T> struct FunctorAddFinit7 { T operator()() { return 7; } }; template <typename T> struct FunctorAddCombine { T operator()(T left, T right ) const { return left + right; } }; template <typename T> struct FunctorAddCombineRef { T operator()(const T& left, const T& right ) const { return left + right; } }; template <typename T> T my_finit( ) { return 0; } template <typename T> T my_combine( T left, T right) { return left + right; } template <typename T> T my_combine_ref( const T &left, const T &right) { return left + right; } template <typename T> class CombineEachHelper { public: CombineEachHelper(T& _result) : my_result(_result) {} void operator()(const T& new_bit) { my_result += new_bit; } CombineEachHelper& operator=(const CombineEachHelper& other) { my_result = other; return *this; } private: T& my_result; }; template <typename T> class CombineEachHelperCnt { public: CombineEachHelperCnt(T& _result, int& _nbuckets) : my_result(_result), nBuckets(_nbuckets) {} void operator()(const T& new_bit) { my_result += new_bit; ++nBuckets; } CombineEachHelperCnt& operator=(const CombineEachHelperCnt& other) { my_result = other.my_result; nBuckets = other.nBuckets; return *this; } private: T& my_result; int& nBuckets; }; template <typename T> class CombineEachVectorHelper { public: typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType; CombineEachVectorHelper(T& _result) : my_result(_result) { } void operator()(const ContainerType& new_bit) { for(typename ContainerType::const_iterator ci = new_bit.begin(); ci != new_bit.end(); ++ci) { my_result += *ci; } } CombineEachVectorHelper& operator=(const CombineEachVectorHelper& other) { my_result=other.my_result; return *this;} private: T& my_result; }; //// end functors template< typename T > void run_serial_scalar_tests(const char *test_name) { tbb::tick_count t0; T sum = 0; REMARK("Testing serial %s... ", test_name); for (int t = -1; t < REPETITIONS; ++t) { if (Verbose && t == 0) t0 = tbb::tick_count::now(); for (int i = 0; i < N; ++i) { sum += 1; } } double ResultValue = sum; ASSERT( EXPECTED_SUM == ResultValue, NULL); REMARK("done\nserial %s, 0, %g, %g\n", test_name, ResultValue, ( tbb::tick_count::now() - t0).seconds()); } template <typename T> class ParallelScalarBody: NoAssign { tbb::combinable<T> &sums; public: ParallelScalarBody ( tbb::combinable<T> &_sums ) : sums(_sums) { } void operator()( const tbb::blocked_range<int> &r ) const { for (int i = r.begin(); i != r.end(); ++i) { bool was_there; T& my_local = sums.local(was_there); if(!was_there) my_local = 0; my_local += 1 ; } } }; // parallel body with no test for first access. template <typename T> class ParallelScalarBodyNoInit: NoAssign { tbb::combinable<T> &sums; public: ParallelScalarBodyNoInit ( tbb::combinable<T> &_sums ) : sums(_sums) { } void operator()( const tbb::blocked_range<int> &r ) const { for (int i = r.begin(); i != r.end(); ++i) { sums.local() += 1 ; } } }; template< typename T > void RunParallelScalarTests(const char *test_name) { tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); for (int p = MinThread; p <= MaxThread; ++p) { if (p == 0) continue; REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); init.initialize(p); tbb::tick_count t0; T assign_sum(0); T combine_sum(0); T combine_ref_sum(0); T combine_each_sum(0); T combine_finit_sum(0); for (int t = -1; t < REPETITIONS; ++t) { if (Verbose && t == 0) t0 = tbb::tick_count::now(); tbb::combinable<T> sums; FunctorAddFinit<T> my_finit_decl; tbb::combinable<T> finit_combinable(my_finit_decl); tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBodyNoInit<T>( finit_combinable ) ); tbb::parallel_for( tbb::blocked_range<int>( 0, N, 10000 ), ParallelScalarBody<T>( sums ) ); // Use combine combine_sum += sums.combine(my_combine<T>); combine_ref_sum += sums.combine(my_combine_ref<T>); CombineEachHelper<T> my_helper(combine_each_sum); sums.combine_each(my_helper); // test assignment tbb::combinable<T> assigned; assigned = sums; assign_sum += assigned.combine(my_combine<T>); combine_finit_sum += finit_combinable.combine(my_combine<T>); } ASSERT( EXPECTED_SUM == combine_sum, NULL); ASSERT( EXPECTED_SUM == combine_ref_sum, NULL); ASSERT( EXPECTED_SUM == assign_sum, NULL); ASSERT( EXPECTED_SUM == combine_finit_sum, NULL); REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, static_cast<double>(combine_sum), ( tbb::tick_count::now() - t0).seconds()); init.terminate(); } } template <typename T> class ParallelVectorForBody: NoAssign { tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &locals; public: ParallelVectorForBody ( tbb::combinable< std::vector<T, tbb::tbb_allocator<T> > > &_locals ) : locals(_locals) { } void operator()( const tbb::blocked_range<int> &r ) const { T one = 1; for (int i = r.begin(); i < r.end(); ++i) { locals.local().push_back( one ); } } }; template< typename T > void RunParallelVectorTests(const char *test_name) { tbb::tick_count t0; tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); typedef std::vector<T, tbb::tbb_allocator<T> > ContainerType; for (int p = MinThread; p <= MaxThread; ++p) { if (p == 0) continue; REMARK("Testing parallel %s on %d thread(s)... ", test_name, p); init.initialize(p); T sum = 0; T sum2 = 0; T sum3 = 0; for (int t = -1; t < REPETITIONS; ++t) { if (Verbose && t == 0) t0 = tbb::tick_count::now(); typedef typename tbb::combinable< ContainerType > CombinableType; CombinableType vs; tbb::parallel_for ( tbb::blocked_range<int> (0, N, 10000), ParallelVectorForBody<T>( vs ) ); // copy construct CombinableType vs2(vs); // this causes an assertion failure, related to allocators... // assign CombinableType vs3; vs3 = vs; CombineEachVectorHelper<T> MyCombineEach(sum); vs.combine_each(MyCombineEach); CombineEachVectorHelper<T> MyCombineEach2(sum2); vs2.combine_each(MyCombineEach2); CombineEachVectorHelper<T> MyCombineEach3(sum3); vs2.combine_each(MyCombineEach3); // combine_each sums all elements of each vector into the result. } double ResultValue = sum; ASSERT( EXPECTED_SUM == ResultValue, NULL); ResultValue = sum2; ASSERT( EXPECTED_SUM == ResultValue, NULL); ResultValue = sum3; ASSERT( EXPECTED_SUM == ResultValue, NULL); REMARK("done\nparallel %s, %d, %g, %g\n", test_name, p, ResultValue, ( tbb::tick_count::now() - t0).seconds()); init.terminate(); } } #include "harness_barrier.h" Harness::SpinBarrier sBarrier; struct Body : NoAssign { tbb::combinable<int>* locals; const int nthread; const int nIters; Body( int nthread_, int niters_ ) : nthread(nthread_), nIters(niters_) { sBarrier.initialize(nthread_); } void operator()(int thread_id ) const { bool existed; sBarrier.wait(); for(int i = 0; i < nIters; ++i ) { existed = thread_id & 1; int oldval = locals->local(existed); ASSERT(existed == (i > 0), "Error on first reference"); ASSERT(!existed || (oldval == thread_id), "Error on fetched value"); existed = thread_id & 1; locals->local(existed) = thread_id; ASSERT(existed, "Error on assignment"); } } }; void TestLocalAllocations( int nthread ) { ASSERT(nthread > 0, "nthread must be positive"); #define NITERATIONS 1000 Body myBody(nthread, NITERATIONS); tbb::combinable<int> myCombinable; myBody.locals = &myCombinable; NativeParallelFor( nthread, myBody ); int mySum = 0; int mySlots = 0; CombineEachHelperCnt<int> myCountCombine(mySum, mySlots); myCombinable.combine_each(myCountCombine); ASSERT(nthread == mySlots, "Incorrect number of slots"); ASSERT(mySum == (nthread - 1) * nthread / 2, "Incorrect values in result"); } void RunParallelTests() { RunParallelScalarTests<int>("int"); RunParallelScalarTests<double>("double"); RunParallelScalarTests<minimal>("minimal"); RunParallelVectorTests<int>("std::vector<int, tbb::tbb_allocator<int> >"); RunParallelVectorTests<double>("std::vector<double, tbb::tbb_allocator<double> >"); } template <typename T> void RunAssignmentAndCopyConstructorTest(const char *test_name) { REMARK("Testing assignment and copy construction for %s\n", test_name); // test creation with finit function (combine returns finit return value if no threads have created locals) FunctorAddFinit7<T> my_finit7_decl; tbb::combinable<T> create2(my_finit7_decl); ASSERT(7 == create2.combine(my_combine<T>), NULL); // test copy construction with function initializer tbb::combinable<T> copy2(create2); ASSERT(7 == copy2.combine(my_combine<T>), NULL); // test copy assignment with function initializer FunctorAddFinit<T> my_finit_decl; tbb::combinable<T> assign2(my_finit_decl); assign2 = create2; ASSERT(7 == assign2.combine(my_combine<T>), NULL); } void RunAssignmentAndCopyConstructorTests() { REMARK("Running assignment and copy constructor tests\n"); RunAssignmentAndCopyConstructorTest<int>("int"); RunAssignmentAndCopyConstructorTest<double>("double"); RunAssignmentAndCopyConstructorTest<minimal>("minimal"); } int TestMain () { if (MaxThread > 0) { RunParallelTests(); } RunAssignmentAndCopyConstructorTests(); for(int i = 1 <= MinThread ? MinThread : 1; i <= MaxThread; ++i) { REMARK("Testing local() allocation with nthreads=%d\n", i); for(int j = 0; j < 100; ++j) { TestLocalAllocations(i); } } return Harness::Done; }
gpl-2.0
unicell/redpatch
drivers/scsi/scsi_debug.c
18
121208
/* * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv * Copyright (C) 1992 Eric Youngdale * Simulate a host adapter with 2 disks attached. Do a lot of checking * to make sure that we are not getting blocks mixed up, and PANIC if * anything out of the ordinary is seen. * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * * This version is more generic, simulating a variable number of disk * (or disk like devices) sharing a common amount of RAM. To be more * realistic, the simulated devices have the transport attributes of * SAS disks. * * * For documentation see http://sg.danny.cz/sg/sdebug26.html * * D. Gilbert (dpg) work for Magneto-Optical device test [20010421] * dpg: work for devfs large number of disks [20010809] * forked for lk 2.5 series [20011216, 20020101] * use vmalloc() more inquiry+mode_sense [20020302] * add timers for delayed responses [20020721] * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031] * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118] * dpg: change style of boot options to "scsi_debug.num_tgts=2" and * module options to "modprobe scsi_debug num_tgts=2" [20021221] */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/genhd.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/vmalloc.h> #include <linux/moduleparam.h> #include <linux/scatterlist.h> #include <linux/blkdev.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> #include <asm/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsicam.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> #include "sd.h" #include "scsi_logging.h" #define SCSI_DEBUG_VERSION "1.82" static const char * scsi_debug_version_date = "20100324"; /* Additional Sense Code (ASC) */ #define NO_ADDITIONAL_SENSE 0x0 #define LOGICAL_UNIT_NOT_READY 0x4 #define UNRECOVERED_READ_ERR 0x11 #define PARAMETER_LIST_LENGTH_ERR 0x1a #define INVALID_OPCODE 0x20 #define ADDR_OUT_OF_RANGE 0x21 #define INVALID_COMMAND_OPCODE 0x20 #define INVALID_FIELD_IN_CDB 0x24 #define INVALID_FIELD_IN_PARAM_LIST 0x26 #define POWERON_RESET 0x29 #define SAVING_PARAMS_UNSUP 0x39 #define TRANSPORT_PROBLEM 0x4b #define THRESHOLD_EXCEEDED 0x5d #define LOW_POWER_COND_ON 0x5e /* Additional Sense Code Qualifier (ASCQ) */ #define ACK_NAK_TO 0x3 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ /* Default values for driver parameters */ #define DEF_NUM_HOST 1 #define DEF_NUM_TGTS 1 #define DEF_MAX_LUNS 1 /* With these defaults, this driver will make 1 host with 1 target * (id 0) containing 1 logical unit (lun 0). That is 1 device. */ #define DEF_ATO 1 #define DEF_DELAY 1 #define DEF_DEV_SIZE_MB 8 #define DEF_DIF 0 #define DEF_DIX 0 #define DEF_D_SENSE 0 #define DEF_EVERY_NTH 0 #define DEF_FAKE_RW 0 #define DEF_GUARD 0 #define DEF_LBPU 0 #define DEF_LBPWS 0 #define DEF_LBPWS10 0 #define DEF_LOWEST_ALIGNED 0 #define DEF_NO_LUN_0 0 #define DEF_NUM_PARTS 0 #define DEF_OPTS 0 #define DEF_OPT_BLKS 64 #define DEF_PHYSBLK_EXP 0 #define DEF_PTYPE 0 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ #define DEF_SECTOR_SIZE 512 #define DEF_UNMAP_ALIGNMENT 0 #define DEF_UNMAP_GRANULARITY 1 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF #define DEF_UNMAP_MAX_DESC 256 #define DEF_VIRTUAL_GB 0 #define DEF_VPD_USE_HOSTNO 1 #define DEF_WRITESAME_LENGTH 0xFFFF /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2 #define SCSI_DEBUG_OPT_TIMEOUT 4 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 #define SCSI_DEBUG_OPT_DIF_ERR 32 #define SCSI_DEBUG_OPT_DIX_ERR 64 /* When "every_nth" > 0 then modulo "every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. * - a TRANSPORT_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. * * When "every_nth" < 0 then after "- every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set. * - a TRANSPORT_ERROR is simulated on successful read and write * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set. * This will continue until some other action occurs (e.g. the user * writing a new value (other than -1 or 1) to every_nth via sysfs). */ /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) * or "peripheral device" addressing (value 0) */ #define SAM2_LUN_ADDRESS_METHOD 0 #define SAM2_WLUN_REPORT_LUNS 0xc101 /* Can queue up to this number of commands. Typically commands that * that have a non-zero delay are queued. */ #define SCSI_DEBUG_CANQUEUE 255 static int scsi_debug_add_host = DEF_NUM_HOST; static int scsi_debug_ato = DEF_ATO; static int scsi_debug_delay = DEF_DELAY; static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; static int scsi_debug_dif = DEF_DIF; static int scsi_debug_dix = DEF_DIX; static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_every_nth = DEF_EVERY_NTH; static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_guard = DEF_GUARD; static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; static int scsi_debug_no_uld = 0; static int scsi_debug_num_parts = DEF_NUM_PARTS; static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ static int scsi_debug_opt_blks = DEF_OPT_BLKS; static int scsi_debug_opts = DEF_OPTS; static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; static int scsi_debug_sector_size = DEF_SECTOR_SIZE; static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; static unsigned int scsi_debug_lbpu = DEF_LBPU; static unsigned int scsi_debug_lbpws = DEF_LBPWS; static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; static int scsi_debug_cmnd_count = 0; #define DEV_READONLY(TGT) (0) #define DEV_REMOVEABLE(TGT) (0) static unsigned int sdebug_store_sectors; static sector_t sdebug_capacity; /* in sectors */ /* old BIOS stuff, kernel may get rid of them but some mode sense pages may still need them */ static int sdebug_heads; /* heads per disk */ static int sdebug_cylinders_per; /* cylinders per surface */ static int sdebug_sectors_per; /* sectors per cylinder */ #define SDEBUG_MAX_PARTS 4 #define SDEBUG_SENSE_LEN 32 #define SCSI_DEBUG_MAX_CMD_LEN 32 static unsigned int scsi_debug_lbp(void) { return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10; } struct sdebug_dev_info { struct list_head dev_list; unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ unsigned int channel; unsigned int target; unsigned int lun; struct sdebug_host_info *sdbg_host; unsigned int wlun; char reset; char stopped; char used; }; struct sdebug_host_info { struct list_head host_list; struct Scsi_Host *shost; struct device dev; struct list_head dev_info_list; }; #define to_sdebug_host(d) \ container_of(d, struct sdebug_host_info, dev) static LIST_HEAD(sdebug_host_list); static DEFINE_SPINLOCK(sdebug_host_list_lock); typedef void (* done_funct_t) (struct scsi_cmnd *); struct sdebug_queued_cmd { int in_use; struct timer_list cmnd_timer; done_funct_t done_funct; struct scsi_cmnd * a_cmnd; int scsi_result; }; static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE]; static unsigned char * fake_storep; /* ramdisk storage */ static unsigned char *dif_storep; /* protection info */ static void *map_storep; /* provisioning map */ static unsigned long map_size; static int num_aborts = 0; static int num_dev_resets = 0; static int num_bus_resets = 0; static int num_host_resets = 0; static int dix_writes; static int dix_reads; static int dif_errors; static DEFINE_SPINLOCK(queued_arr_lock); static DEFINE_RWLOCK(atomic_rw); static char sdebug_proc_name[] = "scsi_debug"; static struct bus_type pseudo_lld_bus; static inline sector_t dif_offset(sector_t sector) { return sector << 3; } static struct device_driver sdebug_driverfs_driver = { .name = sdebug_proc_name, .bus = &pseudo_lld_bus, }; static const int check_condition_result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; static const int illegal_condition_result = (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0x2, 0x4b}; static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 0, 0, 0x0, 0x0}; static int sdebug_add_adapter(void); static void sdebug_remove_adapter(void); static void sdebug_max_tgts_luns(void) { struct sdebug_host_info *sdbg_host; struct Scsi_Host *hpnt; spin_lock(&sdebug_host_list_lock); list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { hpnt = sdbg_host->shost; if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id)) hpnt->max_id = scsi_debug_num_tgts + 1; else hpnt->max_id = scsi_debug_num_tgts; /* scsi_debug_max_luns; */ hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; } spin_unlock(&sdebug_host_list_lock); } static void mk_sense_buffer(struct sdebug_dev_info *devip, int key, int asc, int asq) { unsigned char *sbuff; sbuff = devip->sense_buff; memset(sbuff, 0, SDEBUG_SENSE_LEN); scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq); if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: " "[0x%x,0x%x,0x%x]\n", key, asc, asq); } static void get_data_transfer_info(unsigned char *cmd, unsigned long long *lba, unsigned int *num, u32 *ei_lba) { *ei_lba = 0; switch (*cmd) { case VARIABLE_LENGTH_CMD: *lba = (u64)cmd[19] | (u64)cmd[18] << 8 | (u64)cmd[17] << 16 | (u64)cmd[16] << 24 | (u64)cmd[15] << 32 | (u64)cmd[14] << 40 | (u64)cmd[13] << 48 | (u64)cmd[12] << 56; *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 | (u32)cmd[21] << 16 | (u32)cmd[20] << 24; *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 | (u32)cmd[28] << 24; break; case WRITE_SAME_16: case WRITE_16: case READ_16: *lba = (u64)cmd[9] | (u64)cmd[8] << 8 | (u64)cmd[7] << 16 | (u64)cmd[6] << 24 | (u64)cmd[5] << 32 | (u64)cmd[4] << 40 | (u64)cmd[3] << 48 | (u64)cmd[2] << 56; *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 | (u32)cmd[10] << 24; break; case WRITE_12: case READ_12: *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | (u32)cmd[2] << 24; *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 | (u32)cmd[6] << 24; break; case WRITE_SAME: case WRITE_10: case READ_10: case XDWRITEREAD_10: *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 | (u32)cmd[2] << 24; *num = (u32)cmd[8] | (u32)cmd[7] << 8; break; case WRITE_6: case READ_6: *lba = (u32)cmd[3] | (u32)cmd[2] << 8 | (u32)(cmd[1] & 0x1f) << 16; *num = (0 == cmd[4]) ? 256 : cmd[4]; break; default: break; } } static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd); } return -EINVAL; /* return -ENOTTY; // correct return but upsets fdisk */ } static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only, struct sdebug_dev_info * devip) { if (devip->reset) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Reporting Unit " "attention: power on reset\n"); devip->reset = 0; mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0); return check_condition_result; } if ((0 == reset_only) && devip->stopped) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Reporting Not " "ready: initializing command required\n"); mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2); return check_condition_result; } return 0; } /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { int act_len; struct scsi_data_buffer *sdb = scsi_in(scp); if (!sdb->length) return 0; if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) return (DID_ERROR << 16); act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, arr, arr_len); if (sdb->resid) sdb->resid -= act_len; else sdb->resid = scsi_bufflen(scp) - act_len; return 0; } /* Returns number of bytes fetched into 'arr' or -1 if error. */ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { if (!scsi_bufflen(scp)) return 0; if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE)) return -1; return scsi_sg_copy_to_buffer(scp, arr, arr_len); } static const char * inq_vendor_id = "Linux "; static const char * inq_product_id = "scsi_debug "; static const char * inq_product_rev = "0004"; static int inquiry_evpd_83(unsigned char * arr, int port_group_id, int target_dev_id, int dev_id_num, const char * dev_id_str, int dev_id_str_len) { int num, port_a; char b[32]; port_a = target_dev_id + 1; /* T10 vendor identifier field format (faked) */ arr[0] = 0x2; /* ASCII */ arr[1] = 0x1; arr[2] = 0x0; memcpy(&arr[4], inq_vendor_id, 8); memcpy(&arr[12], inq_product_id, 16); memcpy(&arr[28], dev_id_str, dev_id_str_len); num = 8 + 16 + dev_id_str_len; arr[3] = num; num += 4; if (dev_id_num >= 0) { /* NAA-5, Logical unit identifier (binary) */ arr[num++] = 0x1; /* binary (not necessarily sas) */ arr[num++] = 0x3; /* PIV=0, lu, naa */ arr[num++] = 0x0; arr[num++] = 0x8; arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */ arr[num++] = 0x33; arr[num++] = 0x33; arr[num++] = 0x30; arr[num++] = (dev_id_num >> 24); arr[num++] = (dev_id_num >> 16) & 0xff; arr[num++] = (dev_id_num >> 8) & 0xff; arr[num++] = dev_id_num & 0xff; /* Target relative port number */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x94; /* PIV=1, target port, rel port */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x4; /* length */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; arr[num++] = 0x1; /* relative port A */ } /* NAA-5, Target port identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x93; /* piv=1, target port, naa */ arr[num++] = 0x0; arr[num++] = 0x8; arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ arr[num++] = 0x22; arr[num++] = 0x22; arr[num++] = 0x20; arr[num++] = (port_a >> 24); arr[num++] = (port_a >> 16) & 0xff; arr[num++] = (port_a >> 8) & 0xff; arr[num++] = port_a & 0xff; /* NAA-5, Target port group identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x95; /* piv=1, target port group id */ arr[num++] = 0x0; arr[num++] = 0x4; arr[num++] = 0; arr[num++] = 0; arr[num++] = (port_group_id >> 8) & 0xff; arr[num++] = port_group_id & 0xff; /* NAA-5, Target device identifier */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0xa3; /* piv=1, target device, naa */ arr[num++] = 0x0; arr[num++] = 0x8; arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */ arr[num++] = 0x22; arr[num++] = 0x22; arr[num++] = 0x20; arr[num++] = (target_dev_id >> 24); arr[num++] = (target_dev_id >> 16) & 0xff; arr[num++] = (target_dev_id >> 8) & 0xff; arr[num++] = target_dev_id & 0xff; /* SCSI name string: Target device identifier */ arr[num++] = 0x63; /* proto=sas, UTF-8 */ arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */ arr[num++] = 0x0; arr[num++] = 24; memcpy(arr + num, "naa.52222220", 12); num += 12; snprintf(b, sizeof(b), "%08X", target_dev_id); memcpy(arr + num, b, 8); num += 8; memset(arr + num, 0, 4); num += 4; return num; } static unsigned char vpd84_data[] = { /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0, 0x22,0x22,0x22,0x0,0xbb,0x1, 0x22,0x22,0x22,0x0,0xbb,0x2, }; static int inquiry_evpd_84(unsigned char * arr) { memcpy(arr, vpd84_data, sizeof(vpd84_data)); return sizeof(vpd84_data); } static int inquiry_evpd_85(unsigned char * arr) { int num = 0; const char * na1 = "https://www.kernel.org/config"; const char * na2 = "http://www.kernel.org/log"; int plen, olen; arr[num++] = 0x1; /* lu, storage config */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; olen = strlen(na1); plen = olen + 1; if (plen % 4) plen = ((plen / 4) + 1) * 4; arr[num++] = plen; /* length, null termianted, padded */ memcpy(arr + num, na1, olen); memset(arr + num + olen, 0, plen - olen); num += plen; arr[num++] = 0x4; /* lu, logging */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; olen = strlen(na2); plen = olen + 1; if (plen % 4) plen = ((plen / 4) + 1) * 4; arr[num++] = plen; /* length, null terminated, padded */ memcpy(arr + num, na2, olen); memset(arr + num + olen, 0, plen - olen); num += plen; return num; } /* SCSI ports VPD page */ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id) { int num = 0; int port_a, port_b; port_a = target_dev_id + 1; port_b = port_a + 1; arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; arr[num++] = 0x1; /* relative port 1 (primary) */ memset(arr + num, 0, 6); num += 6; arr[num++] = 0x0; arr[num++] = 12; /* length tp descriptor */ /* naa-5 target port identifier (A) */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x93; /* PIV=1, target port, NAA */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x8; /* length */ arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ arr[num++] = 0x22; arr[num++] = 0x22; arr[num++] = 0x20; arr[num++] = (port_a >> 24); arr[num++] = (port_a >> 16) & 0xff; arr[num++] = (port_a >> 8) & 0xff; arr[num++] = port_a & 0xff; arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x0; arr[num++] = 0x2; /* relative port 2 (secondary) */ memset(arr + num, 0, 6); num += 6; arr[num++] = 0x0; arr[num++] = 12; /* length tp descriptor */ /* naa-5 target port identifier (B) */ arr[num++] = 0x61; /* proto=sas, binary */ arr[num++] = 0x93; /* PIV=1, target port, NAA */ arr[num++] = 0x0; /* reserved */ arr[num++] = 0x8; /* length */ arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */ arr[num++] = 0x22; arr[num++] = 0x22; arr[num++] = 0x20; arr[num++] = (port_b >> 24); arr[num++] = (port_b >> 16) & 0xff; arr[num++] = (port_b >> 8) & 0xff; arr[num++] = port_b & 0xff; return num; } static unsigned char vpd89_data[] = { /* from 4th byte */ 0,0,0,0, 'l','i','n','u','x',' ',' ',' ', 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ', '1','2','3','4', 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 0xec,0,0,0, 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0, 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20, 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33, 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31, 0x53,0x41, 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, 0x20,0x20, 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, 0x10,0x80, 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0, 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0, 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0, 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40, 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0, 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42, 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8, 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe, 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51, }; static int inquiry_evpd_89(unsigned char * arr) { memcpy(arr, vpd89_data, sizeof(vpd89_data)); return sizeof(vpd89_data); } /* Block limits VPD page (SBC-3) */ static unsigned char vpdb0_data[] = { /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, }; static int inquiry_evpd_b0(unsigned char * arr) { unsigned int gran; memcpy(arr, vpdb0_data, sizeof(vpdb0_data)); /* Optimal transfer length granularity */ gran = 1 << scsi_debug_physblk_exp; arr[2] = (gran >> 8) & 0xff; arr[3] = gran & 0xff; /* Maximum Transfer Length */ if (sdebug_store_sectors > 0x400) { arr[4] = (sdebug_store_sectors >> 24) & 0xff; arr[5] = (sdebug_store_sectors >> 16) & 0xff; arr[6] = (sdebug_store_sectors >> 8) & 0xff; arr[7] = sdebug_store_sectors & 0xff; } /* Optimal Transfer Length */ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); if (scsi_debug_lbpu) { /* Maximum Unmap LBA Count */ put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); /* Maximum Unmap Block Descriptor Count */ put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]); } /* Unmap Granularity Alignment */ if (scsi_debug_unmap_alignment) { put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]); arr[28] |= 0x80; /* UGAVALID */ } /* Optimal Unmap Granularity */ put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); /* Maximum WRITE SAME Length */ put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); return 0x3c; /* Mandatory page length for Logical Block Provisioning */ return sizeof(vpdb0_data); } /* Block device characteristics VPD page (SBC-3) */ static int inquiry_evpd_b1(unsigned char *arr) { memset(arr, 0, 0x3c); arr[0] = 0; arr[1] = 1; /* non rotating medium (e.g. solid state) */ arr[2] = 0; arr[3] = 5; /* less than 1.8" */ return 0x3c; } /* Thin provisioning VPD page (SBC-3) */ static int inquiry_evpd_b2(unsigned char *arr) { memset(arr, 0, 0x8); arr[0] = 0; /* threshold exponent */ if (scsi_debug_lbpu) arr[1] = 1 << 7; if (scsi_debug_lbpws) arr[1] |= 1 << 6; if (scsi_debug_lbpws10) arr[1] |= 1 << 5; return 0x8; } #define SDEBUG_LONG_INQ_SZ 96 #define SDEBUG_MAX_INQ_ARR_SZ 584 static int resp_inquiry(struct scsi_cmnd * scp, int target, struct sdebug_dev_info * devip) { unsigned char pq_pdt; unsigned char * arr; unsigned char *cmd = (unsigned char *)scp->cmnd; int alloc_len, n, ret; alloc_len = (cmd[3] << 8) + cmd[4]; arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; if (devip->wlun) pq_pdt = 0x1e; /* present, wlun */ else if (scsi_debug_no_lun_0 && (0 == devip->lun)) pq_pdt = 0x7f; /* not present, no device type */ else pq_pdt = (scsi_debug_ptype & 0x1f); arr[0] = pq_pdt; if (0x2 & cmd[1]) { /* CMDDT bit set */ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); kfree(arr); return check_condition_result; } else if (0x1 & cmd[1]) { /* EVPD bit set */ int lu_id_num, port_group_id, target_dev_id, len; char lu_id_str[6]; int host_no = devip->sdbg_host->shost->host_no; port_group_id = (((host_no + 1) & 0x7f) << 8) + (devip->channel & 0x7f); if (0 == scsi_debug_vpd_use_hostno) host_no = 0; lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) + (devip->target * 1000) + devip->lun); target_dev_id = ((host_no + 1) * 2000) + (devip->target * 1000) - 3; len = scnprintf(lu_id_str, 6, "%d", lu_id_num); if (0 == cmd[2]) { /* supported vital product data pages */ arr[1] = cmd[2]; /*sanity */ n = 4; arr[n++] = 0x0; /* this page */ arr[n++] = 0x80; /* unit serial number */ arr[n++] = 0x83; /* device identification */ arr[n++] = 0x84; /* software interface ident. */ arr[n++] = 0x85; /* management network addresses */ arr[n++] = 0x86; /* extended inquiry */ arr[n++] = 0x87; /* mode page policy */ arr[n++] = 0x88; /* SCSI ports */ arr[n++] = 0x89; /* ATA information */ arr[n++] = 0xb0; /* Block limits (SBC) */ arr[n++] = 0xb1; /* Block characteristics (SBC) */ if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ arr[n++] = 0xb2; arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ arr[3] = len; memcpy(&arr[4], lu_id_str, len); } else if (0x83 == cmd[2]) { /* device identification */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_83(&arr[4], port_group_id, target_dev_id, lu_id_num, lu_id_str, len); } else if (0x84 == cmd[2]) { /* Software interface ident. */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_84(&arr[4]); } else if (0x85 == cmd[2]) { /* Management network addresses */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_85(&arr[4]); } else if (0x86 == cmd[2]) { /* extended inquiry */ arr[1] = cmd[2]; /*sanity */ arr[3] = 0x3c; /* number of following entries */ if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) arr[4] = 0x4; /* SPT: GRD_CHK:1 */ else if (scsi_debug_dif) arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ else arr[4] = 0x0; /* no protection stuff */ arr[5] = 0x7; /* head of q, ordered + simple q's */ } else if (0x87 == cmd[2]) { /* mode page policy */ arr[1] = cmd[2]; /*sanity */ arr[3] = 0x8; /* number of following entries */ arr[4] = 0x2; /* disconnect-reconnect mp */ arr[6] = 0x80; /* mlus, shared */ arr[8] = 0x18; /* protocol specific lu */ arr[10] = 0x82; /* mlus, per initiator port */ } else if (0x88 == cmd[2]) { /* SCSI Ports */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_88(&arr[4], target_dev_id); } else if (0x89 == cmd[2]) { /* ATA information */ arr[1] = cmd[2]; /*sanity */ n = inquiry_evpd_89(&arr[4]); arr[2] = (n >> 8); arr[3] = (n & 0xff); } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b0(&arr[4]); } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b1(&arr[4]); } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b2(&arr[4]); } else { /* Illegal request, invalid field in cdb */ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); kfree(arr); return check_condition_result; } len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); ret = fill_from_dev_buffer(scp, arr, min(len, SDEBUG_MAX_INQ_ARR_SZ)); kfree(arr); return ret; } /* drops through here for a standard inquiry */ arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */ arr[2] = scsi_debug_scsi_level; arr[3] = 2; /* response_data_format==2 */ arr[4] = SDEBUG_LONG_INQ_SZ - 5; arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */ if (0 == scsi_debug_vpd_use_hostno) arr[5] = 0x10; /* claim: implicit TGPS */ arr[6] = 0x10; /* claim: MultiP */ /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ arr[7] = 0xa; /* claim: LINKED + CMDQUE */ memcpy(&arr[8], inq_vendor_id, 8); memcpy(&arr[16], inq_product_id, 16); memcpy(&arr[32], inq_product_rev, 4); /* version descriptors (2 bytes each) follow */ arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */ arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */ n = 62; if (scsi_debug_ptype == 0) { arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */ } else if (scsi_debug_ptype == 1) { arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */ } arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */ ret = fill_from_dev_buffer(scp, arr, min(alloc_len, SDEBUG_LONG_INQ_SZ)); kfree(arr); return ret; } static int resp_requests(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char * sbuff; unsigned char *cmd = (unsigned char *)scp->cmnd; unsigned char arr[SDEBUG_SENSE_LEN]; int want_dsense; int len = 18; memset(arr, 0, sizeof(arr)); if (devip->reset == 1) mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0); want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense; sbuff = devip->sense_buff; if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) { if (want_dsense) { arr[0] = 0x72; arr[1] = 0x0; /* NO_SENSE in sense_key */ arr[2] = THRESHOLD_EXCEEDED; arr[3] = 0xff; /* TEST set and MRIE==6 */ } else { arr[0] = 0x70; arr[2] = 0x0; /* NO_SENSE in sense_key */ arr[7] = 0xa; /* 18 byte sense buffer */ arr[12] = THRESHOLD_EXCEEDED; arr[13] = 0xff; /* TEST set and MRIE==6 */ } } else { memcpy(arr, sbuff, SDEBUG_SENSE_LEN); if ((cmd[1] & 1) && (! scsi_debug_dsense)) { /* DESC bit set and sense_buff in fixed format */ memset(arr, 0, sizeof(arr)); arr[0] = 0x72; arr[1] = sbuff[2]; /* sense key */ arr[2] = sbuff[12]; /* asc */ arr[3] = sbuff[13]; /* ascq */ len = 8; } } mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0); return fill_from_dev_buffer(scp, arr, len); } static int resp_start_stop(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char *cmd = (unsigned char *)scp->cmnd; int power_cond, errsts, start; if ((errsts = check_readiness(scp, 1, devip))) return errsts; power_cond = (cmd[4] & 0xf0) >> 4; if (power_cond) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } start = cmd[4] & 1; if (start == devip->stopped) devip->stopped = !start; return 0; } static sector_t get_sdebug_capacity(void) { if (scsi_debug_virtual_gb > 0) return (sector_t)scsi_debug_virtual_gb * (1073741824 / scsi_debug_sector_size); else return sdebug_store_sectors; } #define SDEBUG_READCAP_ARR_SZ 8 static int resp_readcap(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char arr[SDEBUG_READCAP_ARR_SZ]; unsigned int capac; int errsts; if ((errsts = check_readiness(scp, 1, devip))) return errsts; /* following just in case virtual_gb changed */ sdebug_capacity = get_sdebug_capacity(); memset(arr, 0, SDEBUG_READCAP_ARR_SZ); if (sdebug_capacity < 0xffffffff) { capac = (unsigned int)sdebug_capacity - 1; arr[0] = (capac >> 24); arr[1] = (capac >> 16) & 0xff; arr[2] = (capac >> 8) & 0xff; arr[3] = capac & 0xff; } else { arr[0] = 0xff; arr[1] = 0xff; arr[2] = 0xff; arr[3] = 0xff; } arr[6] = (scsi_debug_sector_size >> 8) & 0xff; arr[7] = scsi_debug_sector_size & 0xff; return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ); } #define SDEBUG_READCAP16_ARR_SZ 32 static int resp_readcap16(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char *cmd = (unsigned char *)scp->cmnd; unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; unsigned long long capac; int errsts, k, alloc_len; if ((errsts = check_readiness(scp, 1, devip))) return errsts; alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8) + cmd[13]); /* following just in case virtual_gb changed */ sdebug_capacity = get_sdebug_capacity(); memset(arr, 0, SDEBUG_READCAP16_ARR_SZ); capac = sdebug_capacity - 1; for (k = 0; k < 8; ++k, capac >>= 8) arr[7 - k] = capac & 0xff; arr[8] = (scsi_debug_sector_size >> 24) & 0xff; arr[9] = (scsi_debug_sector_size >> 16) & 0xff; arr[10] = (scsi_debug_sector_size >> 8) & 0xff; arr[11] = scsi_debug_sector_size & 0xff; arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; if (scsi_debug_lbp()) arr[14] |= 0x80; /* LBPME */ arr[15] = scsi_debug_lowest_aligned & 0xff; if (scsi_debug_dif) { arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */ arr[12] |= 1; /* PROT_EN */ } return fill_from_dev_buffer(scp, arr, min(alloc_len, SDEBUG_READCAP16_ARR_SZ)); } #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412 static int resp_report_tgtpgs(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned char *cmd = (unsigned char *)scp->cmnd; unsigned char * arr; int host_no = devip->sdbg_host->shost->host_no; int n, ret, alen, rlen; int port_group_a, port_group_b, port_a, port_b; alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8) + cmd[9]); arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC); if (! arr) return DID_REQUEUE << 16; /* * EVPD page 0x88 states we have two ports, one * real and a fake port with no device connected. * So we create two port groups with one port each * and set the group with port B to unavailable. */ port_a = 0x1; /* relative port A */ port_b = 0x2; /* relative port B */ port_group_a = (((host_no + 1) & 0x7f) << 8) + (devip->channel & 0x7f); port_group_b = (((host_no + 1) & 0x7f) << 8) + (devip->channel & 0x7f) + 0x80; /* * The asymmetric access state is cycled according to the host_id. */ n = 4; if (0 == scsi_debug_vpd_use_hostno) { arr[n++] = host_no % 3; /* Asymm access state */ arr[n++] = 0x0F; /* claim: all states are supported */ } else { arr[n++] = 0x0; /* Active/Optimized path */ arr[n++] = 0x01; /* claim: only support active/optimized paths */ } arr[n++] = (port_group_a >> 8) & 0xff; arr[n++] = port_group_a & 0xff; arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Status code */ arr[n++] = 0; /* Vendor unique */ arr[n++] = 0x1; /* One port per group */ arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Reserved */ arr[n++] = (port_a >> 8) & 0xff; arr[n++] = port_a & 0xff; arr[n++] = 3; /* Port unavailable */ arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */ arr[n++] = (port_group_b >> 8) & 0xff; arr[n++] = port_group_b & 0xff; arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Status code */ arr[n++] = 0; /* Vendor unique */ arr[n++] = 0x1; /* One port per group */ arr[n++] = 0; /* Reserved */ arr[n++] = 0; /* Reserved */ arr[n++] = (port_b >> 8) & 0xff; arr[n++] = port_b & 0xff; rlen = n - 4; arr[0] = (rlen >> 24) & 0xff; arr[1] = (rlen >> 16) & 0xff; arr[2] = (rlen >> 8) & 0xff; arr[3] = rlen & 0xff; /* * Return the smallest value of either * - The allocated length * - The constructed command length * - The maximum array size */ rlen = min(alen,n); ret = fill_from_dev_buffer(scp, arr, min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ)); kfree(arr); return ret; } /* <<Following mode page info copied from ST318451LW>> */ static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target) { /* Read-Write Error Recovery page for mode_sense */ unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0, 5, 0, 0xff, 0xff}; memcpy(p, err_recov_pg, sizeof(err_recov_pg)); if (1 == pcontrol) memset(p + 2, 0, sizeof(err_recov_pg) - 2); return sizeof(err_recov_pg); } static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target) { /* Disconnect-Reconnect page for mode_sense */ unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; memcpy(p, disconnect_pg, sizeof(disconnect_pg)); if (1 == pcontrol) memset(p + 2, 0, sizeof(disconnect_pg) - 2); return sizeof(disconnect_pg); } static int resp_format_pg(unsigned char * p, int pcontrol, int target) { /* Format device page for mode_sense */ unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x40, 0, 0, 0}; memcpy(p, format_pg, sizeof(format_pg)); p[10] = (sdebug_sectors_per >> 8) & 0xff; p[11] = sdebug_sectors_per & 0xff; p[12] = (scsi_debug_sector_size >> 8) & 0xff; p[13] = scsi_debug_sector_size & 0xff; if (DEV_REMOVEABLE(target)) p[20] |= 0x20; /* should agree with INQUIRY */ if (1 == pcontrol) memset(p + 2, 0, sizeof(format_pg) - 2); return sizeof(format_pg); } static int resp_caching_pg(unsigned char * p, int pcontrol, int target) { /* Caching page for mode_sense */ unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0}; memcpy(p, caching_pg, sizeof(caching_pg)); if (1 == pcontrol) memset(p + 2, 0, sizeof(caching_pg) - 2); return sizeof(caching_pg); } static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target) { /* Control mode page for mode_sense */ unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0, 0, 0, 0x2, 0x4b}; if (scsi_debug_dsense) ctrl_m_pg[2] |= 0x4; else ctrl_m_pg[2] &= ~0x4; if (scsi_debug_ato) ctrl_m_pg[5] |= 0x80; /* ATO=1 */ memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg)); if (1 == pcontrol) memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg)); else if (2 == pcontrol) memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg)); return sizeof(ctrl_m_pg); } static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target) { /* Informational Exceptions control mode page for mode_sense */ unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0, 0, 0, 0x0, 0x0}; unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0, 0, 0, 0x0, 0x0}; memcpy(p, iec_m_pg, sizeof(iec_m_pg)); if (1 == pcontrol) memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg)); else if (2 == pcontrol) memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg)); return sizeof(iec_m_pg); } static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target) { /* SAS SSP mode page - short format for mode_sense */ unsigned char sas_sf_m_pg[] = {0x19, 0x6, 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0}; memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg)); if (1 == pcontrol) memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2); return sizeof(sas_sf_m_pg); } static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target, int target_dev_id) { /* SAS phy control and discover mode page for mode_sense */ unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2, 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0, 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, 0x2, 0, 0, 0, 0, 0, 0, 0, 0x88, 0x99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0, 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0, 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1, 0x3, 0, 0, 0, 0, 0, 0, 0, 0x88, 0x99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; int port_a, port_b; port_a = target_dev_id + 1; port_b = port_a + 1; memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg)); p[20] = (port_a >> 24); p[21] = (port_a >> 16) & 0xff; p[22] = (port_a >> 8) & 0xff; p[23] = port_a & 0xff; p[48 + 20] = (port_b >> 24); p[48 + 21] = (port_b >> 16) & 0xff; p[48 + 22] = (port_b >> 8) & 0xff; p[48 + 23] = port_b & 0xff; if (1 == pcontrol) memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4); return sizeof(sas_pcd_m_pg); } static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol) { /* SAS SSP shared protocol specific port mode subpage */ unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg)); if (1 == pcontrol) memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4); return sizeof(sas_sha_m_pg); } #define SDEBUG_MAX_MSENSE_SZ 256 static int resp_mode_sense(struct scsi_cmnd * scp, int target, struct sdebug_dev_info * devip) { unsigned char dbd, llbaa; int pcontrol, pcode, subpcode, bd_len; unsigned char dev_spec; int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; unsigned char * ap; unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; unsigned char *cmd = (unsigned char *)scp->cmnd; if ((errsts = check_readiness(scp, 1, devip))) return errsts; dbd = !!(cmd[1] & 0x8); pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; subpcode = cmd[3]; msense_6 = (MODE_SENSE == cmd[0]); llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10); if ((0 == scsi_debug_ptype) && (0 == dbd)) bd_len = llbaa ? 16 : 8; else bd_len = 0; alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]); memset(arr, 0, SDEBUG_MAX_MSENSE_SZ); if (0x3 == pcontrol) { /* Saving values not supported */ mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0); return check_condition_result; } target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + (devip->target * 1000) - 3; /* set DPOFUA bit for disks */ if (0 == scsi_debug_ptype) dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10; else dev_spec = 0x0; if (msense_6) { arr[2] = dev_spec; arr[3] = bd_len; offset = 4; } else { arr[3] = dev_spec; if (16 == bd_len) arr[4] = 0x1; /* set LONGLBA bit */ arr[7] = bd_len; /* assume 255 or less */ offset = 8; } ap = arr + offset; if ((bd_len > 0) && (!sdebug_capacity)) sdebug_capacity = get_sdebug_capacity(); if (8 == bd_len) { if (sdebug_capacity > 0xfffffffe) { ap[0] = 0xff; ap[1] = 0xff; ap[2] = 0xff; ap[3] = 0xff; } else { ap[0] = (sdebug_capacity >> 24) & 0xff; ap[1] = (sdebug_capacity >> 16) & 0xff; ap[2] = (sdebug_capacity >> 8) & 0xff; ap[3] = sdebug_capacity & 0xff; } ap[6] = (scsi_debug_sector_size >> 8) & 0xff; ap[7] = scsi_debug_sector_size & 0xff; offset += bd_len; ap = arr + offset; } else if (16 == bd_len) { unsigned long long capac = sdebug_capacity; for (k = 0; k < 8; ++k, capac >>= 8) ap[7 - k] = capac & 0xff; ap[12] = (scsi_debug_sector_size >> 24) & 0xff; ap[13] = (scsi_debug_sector_size >> 16) & 0xff; ap[14] = (scsi_debug_sector_size >> 8) & 0xff; ap[15] = scsi_debug_sector_size & 0xff; offset += bd_len; ap = arr + offset; } if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) { /* TODO: Control Extension page */ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } switch (pcode) { case 0x1: /* Read-Write error recovery page, direct access */ len = resp_err_recov_pg(ap, pcontrol, target); offset += len; break; case 0x2: /* Disconnect-Reconnect page, all devices */ len = resp_disconnect_pg(ap, pcontrol, target); offset += len; break; case 0x3: /* Format device page, direct access */ len = resp_format_pg(ap, pcontrol, target); offset += len; break; case 0x8: /* Caching page, direct access */ len = resp_caching_pg(ap, pcontrol, target); offset += len; break; case 0xa: /* Control Mode page, all devices */ len = resp_ctrl_m_pg(ap, pcontrol, target); offset += len; break; case 0x19: /* if spc==1 then sas phy, control+discover */ if ((subpcode > 0x2) && (subpcode < 0xff)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } len = 0; if ((0x0 == subpcode) || (0xff == subpcode)) len += resp_sas_sf_m_pg(ap + len, pcontrol, target); if ((0x1 == subpcode) || (0xff == subpcode)) len += resp_sas_pcd_m_spg(ap + len, pcontrol, target, target_dev_id); if ((0x2 == subpcode) || (0xff == subpcode)) len += resp_sas_sha_m_spg(ap + len, pcontrol); offset += len; break; case 0x1c: /* Informational Exceptions Mode page, all devices */ len = resp_iec_m_pg(ap, pcontrol, target); offset += len; break; case 0x3f: /* Read all Mode pages */ if ((0 == subpcode) || (0xff == subpcode)) { len = resp_err_recov_pg(ap, pcontrol, target); len += resp_disconnect_pg(ap + len, pcontrol, target); len += resp_format_pg(ap + len, pcontrol, target); len += resp_caching_pg(ap + len, pcontrol, target); len += resp_ctrl_m_pg(ap + len, pcontrol, target); len += resp_sas_sf_m_pg(ap + len, pcontrol, target); if (0xff == subpcode) { len += resp_sas_pcd_m_spg(ap + len, pcontrol, target, target_dev_id); len += resp_sas_sha_m_spg(ap + len, pcontrol); } len += resp_iec_m_pg(ap + len, pcontrol, target); } else { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } offset += len; break; default: mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } if (msense_6) arr[0] = offset - 1; else { arr[0] = ((offset - 2) >> 8) & 0xff; arr[1] = (offset - 2) & 0xff; } return fill_from_dev_buffer(scp, arr, min(alloc_len, offset)); } #define SDEBUG_MAX_MSELECT_SZ 512 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6, struct sdebug_dev_info * devip) { int pf, sp, ps, md_len, bd_len, off, spf, pg_len; int param_len, res, errsts, mpage; unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; unsigned char *cmd = (unsigned char *)scp->cmnd; if ((errsts = check_readiness(scp, 1, devip))) return errsts; memset(arr, 0, sizeof(arr)); pf = cmd[1] & 0x10; sp = cmd[1] & 0x1; param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]); if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } res = fetch_to_dev_buffer(scp, arr, param_len); if (-1 == res) return (DID_ERROR << 16); else if ((res < param_len) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, " " IO sent=%d bytes\n", param_len, res); md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2); bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]); if (md_len > 2) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_PARAM_LIST, 0); return check_condition_result; } off = bd_len + (mselect6 ? 4 : 8); mpage = arr[off] & 0x3f; ps = !!(arr[off] & 0x80); if (ps) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_PARAM_LIST, 0); return check_condition_result; } spf = !!(arr[off] & 0x40); pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) : (arr[off + 1] + 2); if ((pg_len + off) > param_len) { mk_sense_buffer(devip, ILLEGAL_REQUEST, PARAMETER_LIST_LENGTH_ERR, 0); return check_condition_result; } switch (mpage) { case 0xa: /* Control Mode page */ if (ctrl_m_pg[1] == arr[off + 1]) { memcpy(ctrl_m_pg + 2, arr + off + 2, sizeof(ctrl_m_pg) - 2); scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4); return 0; } break; case 0x1c: /* Informational Exceptions Mode page */ if (iec_m_pg[1] == arr[off + 1]) { memcpy(iec_m_pg + 2, arr + off + 2, sizeof(iec_m_pg) - 2); return 0; } break; default: break; } mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_PARAM_LIST, 0); return check_condition_result; } static int resp_temp_l_pg(unsigned char * arr) { unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38, 0x0, 0x1, 0x3, 0x2, 0x0, 65, }; memcpy(arr, temp_l_pg, sizeof(temp_l_pg)); return sizeof(temp_l_pg); } static int resp_ie_l_pg(unsigned char * arr) { unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38, }; memcpy(arr, ie_l_pg, sizeof(ie_l_pg)); if (iec_m_pg[2] & 0x4) { /* TEST bit set */ arr[4] = THRESHOLD_EXCEEDED; arr[5] = 0xff; } return sizeof(ie_l_pg); } #define SDEBUG_MAX_LSENSE_SZ 512 static int resp_log_sense(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; unsigned char *cmd = (unsigned char *)scp->cmnd; if ((errsts = check_readiness(scp, 1, devip))) return errsts; memset(arr, 0, sizeof(arr)); ppc = cmd[1] & 0x2; sp = cmd[1] & 0x1; if (ppc || sp) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } pcontrol = (cmd[2] & 0xc0) >> 6; pcode = cmd[2] & 0x3f; subpcode = cmd[3] & 0xff; alloc_len = (cmd[7] << 8) + cmd[8]; arr[0] = pcode; if (0 == subpcode) { switch (pcode) { case 0x0: /* Supported log pages log page */ n = 4; arr[n++] = 0x0; /* this page */ arr[n++] = 0xd; /* Temperature */ arr[n++] = 0x2f; /* Informational exceptions */ arr[3] = n - 4; break; case 0xd: /* Temperature log page */ arr[3] = resp_temp_l_pg(arr + 4); break; case 0x2f: /* Informational exceptions log page */ arr[3] = resp_ie_l_pg(arr + 4); break; default: mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } } else if (0xff == subpcode) { arr[0] |= 0x40; arr[1] = subpcode; switch (pcode) { case 0x0: /* Supported log pages and subpages log page */ n = 4; arr[n++] = 0x0; arr[n++] = 0x0; /* 0,0 page */ arr[n++] = 0x0; arr[n++] = 0xff; /* this page */ arr[n++] = 0xd; arr[n++] = 0x0; /* Temperature */ arr[n++] = 0x2f; arr[n++] = 0x0; /* Informational exceptions */ arr[3] = n - 4; break; case 0xd: /* Temperature subpages */ n = 4; arr[n++] = 0xd; arr[n++] = 0x0; /* Temperature */ arr[3] = n - 4; break; case 0x2f: /* Informational exceptions subpages */ n = 4; arr[n++] = 0x2f; arr[n++] = 0x0; /* Informational exceptions */ arr[3] = n - 4; break; default: mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } } else { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len); return fill_from_dev_buffer(scp, arr, min(len, SDEBUG_MAX_INQ_ARR_SZ)); } static int check_device_access_params(struct sdebug_dev_info *devi, unsigned long long lba, unsigned int num) { if (lba + num > sdebug_capacity) { mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0); return check_condition_result; } /* transfer length excessive (tie in to block limits VPD page) */ if (num > sdebug_store_sectors) { mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } return 0; } static int do_device_access(struct scsi_cmnd *scmd, struct sdebug_dev_info *devi, unsigned long long lba, unsigned int num, int write) { int ret; unsigned int block, rest = 0; int (*func)(struct scsi_cmnd *, unsigned char *, int); func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; block = do_div(lba, sdebug_store_sectors); if (block + num > sdebug_store_sectors) rest = block + num - sdebug_store_sectors; ret = func(scmd, fake_storep + (block * scsi_debug_sector_size), (num - rest) * scsi_debug_sector_size); if (!ret && rest) ret = func(scmd, fake_storep, rest * scsi_debug_sector_size); return ret; } static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) { unsigned int i, resid; struct scatterlist *psgl; struct sd_dif_tuple *sdt; sector_t sector; sector_t tmp_sec = start_sec; void *paddr; start_sec = do_div(tmp_sec, sdebug_store_sectors); sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec)); for (i = 0 ; i < sectors ; i++) { u16 csum; if (sdt[i].app_tag == 0xffff) continue; sector = start_sec + i; switch (scsi_debug_guard) { case 1: csum = ip_compute_csum(fake_storep + sector * scsi_debug_sector_size, scsi_debug_sector_size); break; case 0: csum = crc_t10dif(fake_storep + sector * scsi_debug_sector_size, scsi_debug_sector_size); csum = cpu_to_be16(csum); break; default: BUG(); } if (sdt[i].guard_tag != csum) { printk(KERN_ERR "%s: GUARD check failed on sector %lu" \ " rcvd 0x%04x, data 0x%04x\n", __func__, (unsigned long)sector, be16_to_cpu(sdt[i].guard_tag), be16_to_cpu(csum)); dif_errors++; return 0x01; } if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: REF check failed on sector %lu\n", __func__, (unsigned long)sector); dif_errors++; return 0x03; } if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && be32_to_cpu(sdt[i].ref_tag) != ei_lba) { printk(KERN_ERR "%s: REF check failed on sector %lu\n", __func__, (unsigned long)sector); dif_errors++; return 0x03; } ei_lba++; } resid = sectors * 8; /* Bytes of protection data to copy into sgl */ sector = start_sec; scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { int len = min(psgl->length, resid); paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset; memcpy(paddr, dif_storep + dif_offset(sector), len); sector += len >> 3; if (sector >= sdebug_store_sectors) { /* Force wrap */ tmp_sec = sector; sector = do_div(tmp_sec, sdebug_store_sectors); } resid -= len; kunmap_atomic(paddr, KM_IRQ0); } dix_reads++; return 0; } static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip, u32 ei_lba) { unsigned long iflags; int ret; ret = check_device_access_params(devip, lba, num); if (ret) return ret; if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && (lba <= OPT_MEDIUM_ERR_ADDR) && ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { /* claim unrecoverable read error */ mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); /* set info field and valid bit for fixed descriptor */ if (0x70 == (devip->sense_buff[0] & 0x7f)) { devip->sense_buff[0] |= 0x80; /* Valid bit */ ret = OPT_MEDIUM_ERR_ADDR; devip->sense_buff[3] = (ret >> 24) & 0xff; devip->sense_buff[4] = (ret >> 16) & 0xff; devip->sense_buff[5] = (ret >> 8) & 0xff; devip->sense_buff[6] = ret & 0xff; } return check_condition_result; } /* DIX + T10 DIF */ if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba); if (prot_ret) { mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret); return illegal_condition_result; } } read_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 0); read_unlock_irqrestore(&atomic_rw, iflags); return ret; } void dump_sector(unsigned char *buf, int len) { int i, j; printk(KERN_ERR ">>> Sector Dump <<<\n"); for (i = 0 ; i < len ; i += 16) { printk(KERN_ERR "%04d: ", i); for (j = 0 ; j < 16 ; j++) { unsigned char c = buf[i+j]; if (c >= 0x20 && c < 0x7e) printk(" %c ", buf[i+j]); else printk("%02x ", buf[i+j]); } printk("\n"); } } static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) { int i, j, ret; struct sd_dif_tuple *sdt; struct scatterlist *dsgl = scsi_sglist(SCpnt); struct scatterlist *psgl = scsi_prot_sglist(SCpnt); void *daddr, *paddr; sector_t tmp_sec = start_sec; sector_t sector; int ppage_offset; unsigned short csum; sector = do_div(tmp_sec, sdebug_store_sectors); BUG_ON(scsi_sg_count(SCpnt) == 0); BUG_ON(scsi_prot_sg_count(SCpnt) == 0); paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; ppage_offset = 0; /* For each data page */ scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset; /* For each sector-sized chunk in data page */ for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) { /* If we're at the end of the current * protection page advance to the next one */ if (ppage_offset >= psgl->length) { kunmap_atomic(paddr, KM_IRQ1); psgl = sg_next(psgl); BUG_ON(psgl == NULL); paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; ppage_offset = 0; } sdt = paddr + ppage_offset; switch (scsi_debug_guard) { case 1: csum = ip_compute_csum(daddr, scsi_debug_sector_size); break; case 0: csum = cpu_to_be16(crc_t10dif(daddr, scsi_debug_sector_size)); break; default: BUG(); ret = 0; goto out; } if (sdt->guard_tag != csum) { printk(KERN_ERR "%s: GUARD check failed on sector %lu " \ "rcvd 0x%04x, calculated 0x%04x\n", __func__, (unsigned long)sector, be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); ret = 0x01; dump_sector(daddr, scsi_debug_sector_size); goto out; } if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION && be32_to_cpu(sdt->ref_tag) != (start_sec & 0xffffffff)) { printk(KERN_ERR "%s: REF check failed on sector %lu\n", __func__, (unsigned long)sector); ret = 0x03; dump_sector(daddr, scsi_debug_sector_size); goto out; } if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && be32_to_cpu(sdt->ref_tag) != ei_lba) { printk(KERN_ERR "%s: REF check failed on sector %lu\n", __func__, (unsigned long)sector); ret = 0x03; dump_sector(daddr, scsi_debug_sector_size); goto out; } /* Would be great to copy this in bigger * chunks. However, for the sake of * correctness we need to verify each sector * before writing it to "stable" storage */ memcpy(dif_storep + dif_offset(sector), sdt, 8); sector++; if (sector == sdebug_store_sectors) sector = 0; /* Force wrap */ start_sec++; ei_lba++; daddr += scsi_debug_sector_size; ppage_offset += sizeof(struct sd_dif_tuple); } kunmap_atomic(daddr, KM_IRQ0); } kunmap_atomic(paddr, KM_IRQ1); dix_writes++; return 0; out: dif_errors++; kunmap_atomic(daddr, KM_IRQ0); kunmap_atomic(paddr, KM_IRQ1); return ret; } static unsigned int map_state(sector_t lba, unsigned int *num) { unsigned int granularity, alignment, mapped; sector_t block, next, end; granularity = scsi_debug_unmap_granularity; alignment = granularity - scsi_debug_unmap_alignment; block = lba + alignment; do_div(block, granularity); mapped = test_bit(block, map_storep); if (mapped) next = find_next_zero_bit(map_storep, map_size, block); else next = find_next_bit(map_storep, map_size, block); end = next * granularity - scsi_debug_unmap_alignment; *num = end - lba; return mapped; } static void map_region(sector_t lba, unsigned int len) { unsigned int granularity, alignment; sector_t end = lba + len; granularity = scsi_debug_unmap_granularity; alignment = granularity - scsi_debug_unmap_alignment; while (lba < end) { sector_t block, rem; block = lba + alignment; rem = do_div(block, granularity); if (block < map_size) set_bit(block, map_storep); lba += granularity - rem; } } static void unmap_region(sector_t lba, unsigned int len) { unsigned int granularity, alignment; sector_t end = lba + len; granularity = scsi_debug_unmap_granularity; alignment = granularity - scsi_debug_unmap_alignment; while (lba < end) { sector_t block, rem; block = lba + alignment; rem = do_div(block, granularity); if (rem == 0 && lba + granularity <= end && block < map_size) clear_bit(block, map_storep); lba += granularity - rem; } } static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip, u32 ei_lba) { unsigned long iflags; int ret; ret = check_device_access_params(devip, lba, num); if (ret) return ret; /* DIX + T10 DIF */ if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) { int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba); if (prot_ret) { mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret); return illegal_condition_result; } } write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); if (scsi_debug_unmap_granularity) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) return (DID_ERROR << 16); else if ((ret < (num * scsi_debug_sector_size)) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, " " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); return 0; } static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip, u32 ei_lba, unsigned int unmap) { unsigned long iflags; unsigned long long i; int ret; ret = check_device_access_params(devip, lba, num); if (ret) return ret; if (num > scsi_debug_write_same_length) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } write_lock_irqsave(&atomic_rw, iflags); if (unmap && scsi_debug_unmap_granularity) { unmap_region(lba, num); goto out; } /* Else fetch one logical block */ ret = fetch_to_dev_buffer(scmd, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return (DID_ERROR << 16); } else if ((ret < (num * scsi_debug_sector_size)) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, " " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret); /* Copy first sector to remaining blocks */ for (i = 1 ; i < num ; i++) memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size), fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); if (scsi_debug_unmap_granularity) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); return 0; } struct unmap_block_desc { __be64 lba; __be32 blocks; __be32 __reserved; }; static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) { unsigned char *buf; struct unmap_block_desc *desc; unsigned int i, payload_len, descriptors; int ret; ret = check_readiness(scmd, 1, devip); if (ret) return ret; payload_len = get_unaligned_be16(&scmd->cmnd[7]); BUG_ON(scsi_bufflen(scmd) != payload_len); descriptors = (payload_len - 8) / 16; buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC); if (!buf) return check_condition_result; scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2); BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16); desc = (void *)&buf[8]; for (i = 0 ; i < descriptors ; i++) { unsigned long long lba = get_unaligned_be64(&desc[i].lba); unsigned int num = get_unaligned_be32(&desc[i].blocks); ret = check_device_access_params(devip, lba, num); if (ret) goto out; unmap_region(lba, num); } ret = 0; out: kfree(buf); return ret; } #define SDEBUG_GET_LBA_STATUS_LEN 32 static int resp_get_lba_status(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip) { unsigned long long lba; unsigned int alloc_len, mapped, num; unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN]; int ret; ret = check_readiness(scmd, 1, devip); if (ret) return ret; lba = get_unaligned_be64(&scmd->cmnd[2]); alloc_len = get_unaligned_be32(&scmd->cmnd[10]); if (alloc_len < 24) return 0; ret = check_device_access_params(devip, lba, 1); if (ret) return ret; mapped = map_state(lba, &num); memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */ put_unaligned_be64(lba, &arr[8]); /* LBA */ put_unaligned_be32(num, &arr[16]); /* Number of blocks */ arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN); } #define SDEBUG_RLUN_ARR_SZ 256 static int resp_report_luns(struct scsi_cmnd * scp, struct sdebug_dev_info * devip) { unsigned int alloc_len; int lun_cnt, i, upper, num, n, wlun, lun; unsigned char *cmd = (unsigned char *)scp->cmnd; int select_report = (int)cmd[2]; struct scsi_lun *one_lun; unsigned char arr[SDEBUG_RLUN_ARR_SZ]; unsigned char * max_addr; alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24); if ((alloc_len < 4) || (select_report > 2)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } /* can produce response with up to 16k luns (lun 0 to lun 16383) */ memset(arr, 0, SDEBUG_RLUN_ARR_SZ); lun_cnt = scsi_debug_max_luns; if (1 == select_report) lun_cnt = 0; else if (scsi_debug_no_lun_0 && (lun_cnt > 0)) --lun_cnt; wlun = (select_report > 0) ? 1 : 0; num = lun_cnt + wlun; arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff; arr[3] = (sizeof(struct scsi_lun) * num) & 0xff; n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) / sizeof(struct scsi_lun)), num); if (n < num) { wlun = 0; lun_cnt = n; } one_lun = (struct scsi_lun *) &arr[8]; max_addr = arr + SDEBUG_RLUN_ARR_SZ; for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0); ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr)); i++, lun++) { upper = (lun >> 8) & 0x3f; if (upper) one_lun[i].scsi_lun[0] = (upper | (SAM2_LUN_ADDRESS_METHOD << 6)); one_lun[i].scsi_lun[1] = lun & 0xff; } if (wlun) { one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff; one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff; i++; } alloc_len = (unsigned char *)(one_lun + i) - arr; return fill_from_dev_buffer(scp, arr, min((int)alloc_len, SDEBUG_RLUN_ARR_SZ)); } static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, unsigned int num, struct sdebug_dev_info *devip) { int i, j, ret = -1; unsigned char *kaddr, *buf; unsigned int offset; struct scatterlist *sg; struct scsi_data_buffer *sdb = scsi_in(scp); /* better not to use temporary buffer. */ buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); if (!buf) return ret; scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); offset = 0; for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); if (!kaddr) goto out; for (j = 0; j < sg->length; j++) *(kaddr + sg->offset + j) ^= *(buf + offset + j); offset += sg->length; kunmap_atomic(kaddr, KM_USER0); } ret = 0; out: kfree(buf); return ret; } /* When timer goes off this function is called. */ static void timer_intr_handler(unsigned long indx) { struct sdebug_queued_cmd * sqcp; unsigned long iflags; if (indx >= scsi_debug_max_queue) { printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too " "large\n"); return; } spin_lock_irqsave(&queued_arr_lock, iflags); sqcp = &queued_arr[(int)indx]; if (! sqcp->in_use) { printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected " "interrupt\n"); spin_unlock_irqrestore(&queued_arr_lock, iflags); return; } sqcp->in_use = 0; if (sqcp->done_funct) { sqcp->a_cmnd->result = sqcp->scsi_result; sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */ } sqcp->done_funct = NULL; spin_unlock_irqrestore(&queued_arr_lock, iflags); } static struct sdebug_dev_info * sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags) { struct sdebug_dev_info *devip; devip = kzalloc(sizeof(*devip), flags); if (devip) { devip->sdbg_host = sdbg_host; list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list); } return devip; } static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev) { struct sdebug_host_info * sdbg_host; struct sdebug_dev_info * open_devip = NULL; struct sdebug_dev_info * devip = (struct sdebug_dev_info *)sdev->hostdata; if (devip) return devip; sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host); if (!sdbg_host) { printk(KERN_ERR "Host info NULL\n"); return NULL; } list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) { if ((devip->used) && (devip->channel == sdev->channel) && (devip->target == sdev->id) && (devip->lun == sdev->lun)) return devip; else { if ((!devip->used) && (!open_devip)) open_devip = devip; } } if (!open_devip) { /* try and make a new one */ open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); if (!open_devip) { printk(KERN_ERR "%s: out of memory at line %d\n", __func__, __LINE__); return NULL; } } open_devip->channel = sdev->channel; open_devip->target = sdev->id; open_devip->lun = sdev->lun; open_devip->sdbg_host = sdbg_host; open_devip->reset = 1; open_devip->used = 1; memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN); if (scsi_debug_dsense) open_devip->sense_buff[0] = 0x72; else { open_devip->sense_buff[0] = 0x70; open_devip->sense_buff[7] = 0xa; } if (sdev->lun == SAM2_WLUN_REPORT_LUNS) open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff; return open_devip; } static int scsi_debug_slave_alloc(struct scsi_device *sdp) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); return 0; } static int scsi_debug_slave_configure(struct scsi_device *sdp) { struct sdebug_dev_info *devip; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN) sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN; devip = devInfoReg(sdp); if (NULL == devip) return 1; /* no resources, will be marked offline */ sdp->hostdata = devip; if (sdp->host->cmd_per_lun) scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING, sdp->host->cmd_per_lun); blk_queue_max_segment_size(sdp->request_queue, 256 * 1024); if (scsi_debug_no_uld) sdp->no_uld_attach = 1; return 0; } static void scsi_debug_slave_destroy(struct scsi_device *sdp) { struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); if (devip) { /* make this slot avaliable for re-use */ devip->used = 0; sdp->hostdata = NULL; } } /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */ static int stop_queued_cmnd(struct scsi_cmnd *cmnd) { unsigned long iflags; int k; struct sdebug_queued_cmd *sqcp; spin_lock_irqsave(&queued_arr_lock, iflags); for (k = 0; k < scsi_debug_max_queue; ++k) { sqcp = &queued_arr[k]; if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) { del_timer_sync(&sqcp->cmnd_timer); sqcp->in_use = 0; sqcp->a_cmnd = NULL; break; } } spin_unlock_irqrestore(&queued_arr_lock, iflags); return (k < scsi_debug_max_queue) ? 1 : 0; } /* Deletes (stops) timers of all queued commands */ static void stop_all_queued(void) { unsigned long iflags; int k; struct sdebug_queued_cmd *sqcp; spin_lock_irqsave(&queued_arr_lock, iflags); for (k = 0; k < scsi_debug_max_queue; ++k) { sqcp = &queued_arr[k]; if (sqcp->in_use && sqcp->a_cmnd) { del_timer_sync(&sqcp->cmnd_timer); sqcp->in_use = 0; sqcp->a_cmnd = NULL; } } spin_unlock_irqrestore(&queued_arr_lock, iflags); } static int scsi_debug_abort(struct scsi_cmnd * SCpnt) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: abort\n"); ++num_aborts; stop_queued_cmnd(SCpnt); return SUCCESS; } static int scsi_debug_biosparam(struct scsi_device *sdev, struct block_device * bdev, sector_t capacity, int *info) { int res; unsigned char *buf; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: biosparam\n"); buf = scsi_bios_ptable(bdev); if (buf) { res = scsi_partsize(buf, capacity, &info[2], &info[0], &info[1]); kfree(buf); if (! res) return res; } info[0] = sdebug_heads; info[1] = sdebug_sectors_per; info[2] = sdebug_cylinders_per; return 0; } static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt) { struct sdebug_dev_info * devip; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: device_reset\n"); ++num_dev_resets; if (SCpnt) { devip = devInfoReg(SCpnt->device); if (devip) devip->reset = 1; } return SUCCESS; } static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt) { struct sdebug_host_info *sdbg_host; struct sdebug_dev_info * dev_info; struct scsi_device * sdp; struct Scsi_Host * hp; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: bus_reset\n"); ++num_bus_resets; if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) { sdbg_host = *(struct sdebug_host_info **)shost_priv(hp); if (sdbg_host) { list_for_each_entry(dev_info, &sdbg_host->dev_info_list, dev_list) dev_info->reset = 1; } } return SUCCESS; } static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt) { struct sdebug_host_info * sdbg_host; struct sdebug_dev_info * dev_info; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: host_reset\n"); ++num_host_resets; spin_lock(&sdebug_host_list_lock); list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) { list_for_each_entry(dev_info, &sdbg_host->dev_info_list, dev_list) dev_info->reset = 1; } spin_unlock(&sdebug_host_list_lock); stop_all_queued(); return SUCCESS; } /* Initializes timers in queued array */ static void __init init_all_queued(void) { unsigned long iflags; int k; struct sdebug_queued_cmd * sqcp; spin_lock_irqsave(&queued_arr_lock, iflags); for (k = 0; k < scsi_debug_max_queue; ++k) { sqcp = &queued_arr[k]; init_timer(&sqcp->cmnd_timer); sqcp->in_use = 0; sqcp->a_cmnd = NULL; } spin_unlock_irqrestore(&queued_arr_lock, iflags); } static void __init sdebug_build_parts(unsigned char *ramp, unsigned long store_size) { struct partition * pp; int starts[SDEBUG_MAX_PARTS + 2]; int sectors_per_part, num_sectors, k; int heads_by_sects, start_sec, end_sec; /* assume partition table already zeroed */ if ((scsi_debug_num_parts < 1) || (store_size < 1048576)) return; if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) { scsi_debug_num_parts = SDEBUG_MAX_PARTS; printk(KERN_WARNING "scsi_debug:build_parts: reducing " "partitions to %d\n", SDEBUG_MAX_PARTS); } num_sectors = (int)sdebug_store_sectors; sectors_per_part = (num_sectors - sdebug_sectors_per) / scsi_debug_num_parts; heads_by_sects = sdebug_heads * sdebug_sectors_per; starts[0] = sdebug_sectors_per; for (k = 1; k < scsi_debug_num_parts; ++k) starts[k] = ((k * sectors_per_part) / heads_by_sects) * heads_by_sects; starts[scsi_debug_num_parts] = num_sectors; starts[scsi_debug_num_parts + 1] = 0; ramp[510] = 0x55; /* magic partition markings */ ramp[511] = 0xAA; pp = (struct partition *)(ramp + 0x1be); for (k = 0; starts[k + 1]; ++k, ++pp) { start_sec = starts[k]; end_sec = starts[k + 1] - 1; pp->boot_ind = 0; pp->cyl = start_sec / heads_by_sects; pp->head = (start_sec - (pp->cyl * heads_by_sects)) / sdebug_sectors_per; pp->sector = (start_sec % sdebug_sectors_per) + 1; pp->end_cyl = end_sec / heads_by_sects; pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) / sdebug_sectors_per; pp->end_sector = (end_sec % sdebug_sectors_per) + 1; pp->start_sect = start_sec; pp->nr_sects = end_sec - start_sec + 1; pp->sys_ind = 0x83; /* plain Linux partition */ } } static int schedule_resp(struct scsi_cmnd * cmnd, struct sdebug_dev_info * devip, done_funct_t done, int scsi_result, int delta_jiff) { if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) { if (scsi_result) { struct scsi_device * sdp = cmnd->device; printk(KERN_INFO "scsi_debug: <%u %u %u %u> " "non-zero result=0x%x\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun, scsi_result); } } if (cmnd && devip) { /* simulate autosense by this driver */ if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff)) memcpy(cmnd->sense_buffer, devip->sense_buff, (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ? SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE); } if (delta_jiff <= 0) { if (cmnd) cmnd->result = scsi_result; if (done) done(cmnd); return 0; } else { unsigned long iflags; int k; struct sdebug_queued_cmd * sqcp = NULL; spin_lock_irqsave(&queued_arr_lock, iflags); for (k = 0; k < scsi_debug_max_queue; ++k) { sqcp = &queued_arr[k]; if (! sqcp->in_use) break; } if (k >= scsi_debug_max_queue) { spin_unlock_irqrestore(&queued_arr_lock, iflags); printk(KERN_WARNING "scsi_debug: can_queue exceeded\n"); return 1; /* report busy to mid level */ } sqcp->in_use = 1; sqcp->a_cmnd = cmnd; sqcp->scsi_result = scsi_result; sqcp->done_funct = done; sqcp->cmnd_timer.function = timer_intr_handler; sqcp->cmnd_timer.data = k; sqcp->cmnd_timer.expires = jiffies + delta_jiff; add_timer(&sqcp->cmnd_timer); spin_unlock_irqrestore(&queued_arr_lock, iflags); if (cmnd) cmnd->result = 0; return 0; } } /* Note: The following macros create attribute files in the /sys/module/scsi_debug/parameters directory. Unfortunately this driver is unaware of a change and cannot trigger auxiliary actions as it can when the corresponding attribute in the /sys/bus/pseudo/drivers/scsi_debug directory is changed. */ module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); module_param_named(ato, scsi_debug_ato, int, S_IRUGO); module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); module_param_named(dif, scsi_debug_dif, int, S_IRUGO); module_param_named(dix, scsi_debug_dix, int, S_IRUGO); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); module_param_named(guard, scsi_debug_guard, int, S_IRUGO); module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, S_IRUGO | S_IWUSR); module_param_named(write_same_length, scsi_debug_write_same_length, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SCSI_DEBUG_VERSION); MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); static char sdebug_info[256]; static const char * scsi_debug_info(struct Scsi_Host * shp) { sprintf(sdebug_info, "scsi_debug, version %s [%s], " "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_dev_size_mb, scsi_debug_opts); return sdebug_info; } /* scsi_debug_proc_info * Used if the driver currently has no own support for /proc/scsi */ static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { int len, pos, begin; int orig_length; orig_length = length; if (inout == 1) { char arr[16]; int minLen = length > 15 ? 15 : length; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; memcpy(arr, buffer, minLen); arr[minLen] = '\0'; if (1 != sscanf(arr, "%d", &pos)) return -EINVAL; scsi_debug_opts = pos; if (scsi_debug_every_nth != 0) scsi_debug_cmnd_count = 0; return length; } begin = 0; pos = len = sprintf(buffer, "scsi_debug adapter driver, version " "%s [%s]\n" "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, " "every_nth=%d(curr:%d)\n" "delay=%d, max_luns=%d, scsi_level=%d\n" "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n" "number of aborts=%d, device_reset=%d, bus_resets=%d, " "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n", SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth, scsi_debug_cmnd_count, scsi_debug_delay, scsi_debug_max_luns, scsi_debug_scsi_level, scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets, num_host_resets, dix_reads, dix_writes, dif_errors); if (pos < offset) { len = 0; begin = pos; } *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); if (len > length) len = length; return len; } static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay); } static ssize_t sdebug_delay_store(struct device_driver * ddp, const char * buf, size_t count) { int delay; char work[20]; if (1 == sscanf(buf, "%10s", work)) { if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) { scsi_debug_delay = delay; return count; } } return -EINVAL; } DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show, sdebug_delay_store); static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts); } static ssize_t sdebug_opts_store(struct device_driver * ddp, const char * buf, size_t count) { int opts; char work[20]; if (1 == sscanf(buf, "%10s", work)) { if (0 == strnicmp(work,"0x", 2)) { if (1 == sscanf(&work[2], "%x", &opts)) goto opts_done; } else { if (1 == sscanf(work, "%d", &opts)) goto opts_done; } } return -EINVAL; opts_done: scsi_debug_opts = opts; scsi_debug_cmnd_count = 0; return count; } DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show, sdebug_opts_store); static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype); } static ssize_t sdebug_ptype_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_ptype = n; return count; } return -EINVAL; } DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store); static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense); } static ssize_t sdebug_dsense_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_dsense = n; return count; } return -EINVAL; } DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, sdebug_dsense_store); static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw); } static ssize_t sdebug_fake_rw_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_fake_rw = n; return count; } return -EINVAL; } DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show, sdebug_fake_rw_store); static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0); } static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_no_lun_0 = n; return count; } return -EINVAL; } DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show, sdebug_no_lun_0_store); static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts); } static ssize_t sdebug_num_tgts_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_num_tgts = n; sdebug_max_tgts_luns(); return count; } return -EINVAL; } DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show, sdebug_num_tgts_store); static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb); } DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL); static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts); } DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL); static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth); } static ssize_t sdebug_every_nth_store(struct device_driver * ddp, const char * buf, size_t count) { int nth; if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { scsi_debug_every_nth = nth; scsi_debug_cmnd_count = 0; return count; } return -EINVAL; } DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show, sdebug_every_nth_store); static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns); } static ssize_t sdebug_max_luns_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_max_luns = n; sdebug_max_tgts_luns(); return count; } return -EINVAL; } DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show, sdebug_max_luns_store); static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue); } static ssize_t sdebug_max_queue_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) && (n <= SCSI_DEBUG_CANQUEUE)) { scsi_debug_max_queue = n; return count; } return -EINVAL; } DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show, sdebug_max_queue_store); static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld); } DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL); static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level); } DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL); static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb); } static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_virtual_gb = n; sdebug_capacity = get_sdebug_capacity(); return count; } return -EINVAL; } DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show, sdebug_virtual_gb_store); static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host); } static ssize_t sdebug_add_host_store(struct device_driver * ddp, const char * buf, size_t count) { int delta_hosts; if (sscanf(buf, "%d", &delta_hosts) != 1) return -EINVAL; if (delta_hosts > 0) { do { sdebug_add_adapter(); } while (--delta_hosts); } else if (delta_hosts < 0) { do { sdebug_remove_adapter(); } while (++delta_hosts); } return count; } DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, sdebug_add_host_store); static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno); } static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp, const char * buf, size_t count) { int n; if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) { scsi_debug_vpd_use_hostno = n; return count; } return -EINVAL; } DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, sdebug_vpd_use_hostno_store); static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf) { return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size); } DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL); static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix); } DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL); static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif); } DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL); static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); } DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf) { return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato); } DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) { ssize_t count; if (!scsi_debug_lbp()) return scnprintf(buf, PAGE_SIZE, "0-%u\n", sdebug_store_sectors); count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size); buf[count++] = '\n'; buf[count++] = 0; return count; } DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); /* Note: The following function creates attribute files in the /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these files (over those found in the /sys/module/scsi_debug/parameters directory) is that auxiliary actions can be triggered when an attribute is changed. For example see: sdebug_add_host_store() above. */ static int do_create_driverfs_files(void) { int ret; ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map); return ret; } static void do_remove_driverfs_files(void) { driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay); driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); } static void pseudo_0_release(struct device *dev) { if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n"); } static struct device pseudo_primary = { .init_name = "pseudo_0", .release = pseudo_0_release, }; static int __init scsi_debug_init(void) { unsigned long sz; int host_to_add; int k; int ret; switch (scsi_debug_sector_size) { case 512: case 1024: case 2048: case 4096: break; default: printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n", scsi_debug_sector_size); return -EINVAL; } switch (scsi_debug_dif) { case SD_DIF_TYPE0_PROTECTION: case SD_DIF_TYPE1_PROTECTION: case SD_DIF_TYPE2_PROTECTION: case SD_DIF_TYPE3_PROTECTION: break; default: printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n"); return -EINVAL; } if (scsi_debug_guard > 1) { printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n"); return -EINVAL; } if (scsi_debug_ato > 1) { printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n"); return -EINVAL; } if (scsi_debug_physblk_exp > 15) { printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n", scsi_debug_physblk_exp); return -EINVAL; } if (scsi_debug_lowest_aligned > 0x3fff) { printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n", scsi_debug_lowest_aligned); return -EINVAL; } if (scsi_debug_dev_size_mb < 1) scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */ sz = (unsigned long)scsi_debug_dev_size_mb * 1048576; sdebug_store_sectors = sz / scsi_debug_sector_size; sdebug_capacity = get_sdebug_capacity(); /* play around with geometry, don't waste too much on track 0 */ sdebug_heads = 8; sdebug_sectors_per = 32; if (scsi_debug_dev_size_mb >= 16) sdebug_heads = 32; else if (scsi_debug_dev_size_mb >= 256) sdebug_heads = 64; sdebug_cylinders_per = (unsigned long)sdebug_capacity / (sdebug_sectors_per * sdebug_heads); if (sdebug_cylinders_per >= 1024) { /* other LLDs do this; implies >= 1GB ram disk ... */ sdebug_heads = 255; sdebug_sectors_per = 63; sdebug_cylinders_per = (unsigned long)sdebug_capacity / (sdebug_sectors_per * sdebug_heads); } fake_storep = vmalloc(sz); if (NULL == fake_storep) { printk(KERN_ERR "scsi_debug_init: out of memory, 1\n"); return -ENOMEM; } memset(fake_storep, 0, sz); if (scsi_debug_num_parts > 0) sdebug_build_parts(fake_storep, sz); if (scsi_debug_dif) { int dif_size; dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); dif_storep = vmalloc(dif_size); printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n", dif_size, dif_storep); if (dif_storep == NULL) { printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n"); ret = -ENOMEM; goto free_vm; } memset(dif_storep, 0xff, dif_size); } /* Logical Block Provisioning */ if (scsi_debug_lbp()) { unsigned int map_bytes; scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); scsi_debug_unmap_max_desc = clamp(scsi_debug_unmap_max_desc, 0U, 256U); scsi_debug_unmap_granularity = clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { printk(KERN_ERR "%s: ERR: unmap_granularity < unmap_alignment\n", __func__); return -EINVAL; } map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); map_bytes = map_size >> 3; map_storep = vmalloc(map_bytes); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); if (map_storep == NULL) { printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n"); ret = -ENOMEM; goto free_vm; } memset(map_storep, 0x0, map_bytes); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) map_region(0, 2); } ret = device_register(&pseudo_primary); if (ret < 0) { printk(KERN_WARNING "scsi_debug: device_register error: %d\n", ret); goto free_vm; } ret = bus_register(&pseudo_lld_bus); if (ret < 0) { printk(KERN_WARNING "scsi_debug: bus_register error: %d\n", ret); goto dev_unreg; } ret = driver_register(&sdebug_driverfs_driver); if (ret < 0) { printk(KERN_WARNING "scsi_debug: driver_register error: %d\n", ret); goto bus_unreg; } ret = do_create_driverfs_files(); if (ret < 0) { printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", ret); goto del_files; } init_all_queued(); host_to_add = scsi_debug_add_host; scsi_debug_add_host = 0; for (k = 0; k < host_to_add; k++) { if (sdebug_add_adapter()) { printk(KERN_ERR "scsi_debug_init: " "sdebug_add_adapter failed k=%d\n", k); break; } } if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) { printk(KERN_INFO "scsi_debug_init: built %d host(s)\n", scsi_debug_add_host); } return 0; del_files: do_remove_driverfs_files(); driver_unregister(&sdebug_driverfs_driver); bus_unreg: bus_unregister(&pseudo_lld_bus); dev_unreg: device_unregister(&pseudo_primary); free_vm: if (map_storep) vfree(map_storep); if (dif_storep) vfree(dif_storep); vfree(fake_storep); return ret; } static void __exit scsi_debug_exit(void) { int k = scsi_debug_add_host; stop_all_queued(); for (; k; k--) sdebug_remove_adapter(); do_remove_driverfs_files(); driver_unregister(&sdebug_driverfs_driver); bus_unregister(&pseudo_lld_bus); device_unregister(&pseudo_primary); if (dif_storep) vfree(dif_storep); vfree(fake_storep); } device_initcall(scsi_debug_init); module_exit(scsi_debug_exit); static void sdebug_release_adapter(struct device * dev) { struct sdebug_host_info *sdbg_host; sdbg_host = to_sdebug_host(dev); kfree(sdbg_host); } static int sdebug_add_adapter(void) { int k, devs_per_host; int error = 0; struct sdebug_host_info *sdbg_host; struct sdebug_dev_info *sdbg_devinfo, *tmp; sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); if (NULL == sdbg_host) { printk(KERN_ERR "%s: out of memory at line %d\n", __func__, __LINE__); return -ENOMEM; } INIT_LIST_HEAD(&sdbg_host->dev_info_list); devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns; for (k = 0; k < devs_per_host; k++) { sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); if (!sdbg_devinfo) { printk(KERN_ERR "%s: out of memory at line %d\n", __func__, __LINE__); error = -ENOMEM; goto clean; } } spin_lock(&sdebug_host_list_lock); list_add_tail(&sdbg_host->host_list, &sdebug_host_list); spin_unlock(&sdebug_host_list_lock); sdbg_host->dev.bus = &pseudo_lld_bus; sdbg_host->dev.parent = &pseudo_primary; sdbg_host->dev.release = &sdebug_release_adapter; dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host); error = device_register(&sdbg_host->dev); if (error) goto clean; ++scsi_debug_add_host; return error; clean: list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, dev_list) { list_del(&sdbg_devinfo->dev_list); kfree(sdbg_devinfo); } kfree(sdbg_host); return error; } static void sdebug_remove_adapter(void) { struct sdebug_host_info * sdbg_host = NULL; spin_lock(&sdebug_host_list_lock); if (!list_empty(&sdebug_host_list)) { sdbg_host = list_entry(sdebug_host_list.prev, struct sdebug_host_info, host_list); list_del(&sdbg_host->host_list); } spin_unlock(&sdebug_host_list_lock); if (!sdbg_host) return; device_unregister(&sdbg_host->dev); --scsi_debug_add_host; } static int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) { unsigned char *cmd = (unsigned char *) SCpnt->cmnd; int len, k; unsigned int num; unsigned long long lba; u32 ei_lba; int errsts = 0; int target = SCpnt->device->id; struct sdebug_dev_info *devip = NULL; int inj_recovered = 0; int inj_transport = 0; int inj_dif = 0; int inj_dix = 0; int delay_override = 0; int unmap = 0; scsi_set_resid(SCpnt, 0); if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) { printk(KERN_INFO "scsi_debug: cmd "); for (k = 0, len = SCpnt->cmd_len; k < len; ++k) printk("%02x ", (int)cmd[k]); printk("\n"); } if (target == SCpnt->device->host->hostt->this_id) { printk(KERN_INFO "scsi_debug: initiator's id used as " "target!\n"); return schedule_resp(SCpnt, NULL, done, DID_NO_CONNECT << 16, 0); } if ((SCpnt->device->lun >= scsi_debug_max_luns) && (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS)) return schedule_resp(SCpnt, NULL, done, DID_NO_CONNECT << 16, 0); devip = devInfoReg(SCpnt->device); if (NULL == devip) return schedule_resp(SCpnt, NULL, done, DID_NO_CONNECT << 16, 0); if ((scsi_debug_every_nth != 0) && (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) { scsi_debug_cmnd_count = 0; if (scsi_debug_every_nth < -1) scsi_debug_every_nth = -1; if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) return 0; /* ignore command causing timeout */ else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) inj_recovered = 1; /* to reads and writes below */ else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) inj_transport = 1; /* to reads and writes below */ else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts) inj_dif = 1; /* to reads and writes below */ else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) inj_dix = 1; /* to reads and writes below */ } if (devip->wlun) { switch (*cmd) { case INQUIRY: case REQUEST_SENSE: case TEST_UNIT_READY: case REPORT_LUNS: break; /* only allowable wlun commands */ default: if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Opcode: 0x%x " "not supported for wlun\n", *cmd); mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); errsts = check_condition_result; return schedule_resp(SCpnt, devip, done, errsts, 0); } } switch (*cmd) { case INQUIRY: /* mandatory, ignore unit attention */ delay_override = 1; errsts = resp_inquiry(SCpnt, target, devip); break; case REQUEST_SENSE: /* mandatory, ignore unit attention */ delay_override = 1; errsts = resp_requests(SCpnt, devip); break; case REZERO_UNIT: /* actually this is REWIND for SSC */ case START_STOP: errsts = resp_start_stop(SCpnt, devip); break; case ALLOW_MEDIUM_REMOVAL: errsts = check_readiness(SCpnt, 1, devip); if (errsts) break; if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Medium removal %s\n", cmd[4] ? "inhibited" : "enabled"); break; case SEND_DIAGNOSTIC: /* mandatory */ errsts = check_readiness(SCpnt, 1, devip); break; case TEST_UNIT_READY: /* mandatory */ delay_override = 1; errsts = check_readiness(SCpnt, 0, devip); break; case RESERVE: errsts = check_readiness(SCpnt, 1, devip); break; case RESERVE_10: errsts = check_readiness(SCpnt, 1, devip); break; case RELEASE: errsts = check_readiness(SCpnt, 1, devip); break; case RELEASE_10: errsts = check_readiness(SCpnt, 1, devip); break; case READ_CAPACITY: errsts = resp_readcap(SCpnt, devip); break; case SERVICE_ACTION_IN: if (cmd[1] == SAI_READ_CAPACITY_16) errsts = resp_readcap16(SCpnt, devip); else if (cmd[1] == SAI_GET_LBA_STATUS) { if (scsi_debug_lbp() == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; } else errsts = resp_get_lba_status(SCpnt, devip); } else { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); errsts = check_condition_result; } break; case MAINTENANCE_IN: if (MI_REPORT_TARGET_PGS != cmd[1]) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); errsts = check_condition_result; break; } errsts = resp_report_tgtpgs(SCpnt, devip); break; case READ_16: case READ_12: case READ_10: /* READ{10,12,16} and DIF Type 2 are natural enemies */ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && cmd[1] & 0xe0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; break; } if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && (cmd[1] & 0xe0) == 0) printk(KERN_ERR "Unprotected RD/WR to DIF device\n"); /* fall through */ case READ_6: read: errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; if (scsi_debug_fake_rw) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); errsts = resp_read(SCpnt, lba, num, devip, ei_lba); if (inj_recovered && (0 == errsts)) { mk_sense_buffer(devip, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); errsts = check_condition_result; } else if (inj_transport && (0 == errsts)) { mk_sense_buffer(devip, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO); errsts = check_condition_result; } else if (inj_dif && (0 == errsts)) { mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1); errsts = illegal_condition_result; } else if (inj_dix && (0 == errsts)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1); errsts = illegal_condition_result; } break; case REPORT_LUNS: /* mandatory, ignore unit attention */ delay_override = 1; errsts = resp_report_luns(SCpnt, devip); break; case VERIFY: /* 10 byte SBC-2 command */ errsts = check_readiness(SCpnt, 0, devip); break; case WRITE_16: case WRITE_12: case WRITE_10: /* WRITE{10,12,16} and DIF Type 2 are natural enemies */ if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && cmd[1] & 0xe0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; break; } if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION || scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) && (cmd[1] & 0xe0) == 0) printk(KERN_ERR "Unprotected RD/WR to DIF device\n"); /* fall through */ case WRITE_6: write: errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; if (scsi_debug_fake_rw) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); errsts = resp_write(SCpnt, lba, num, devip, ei_lba); if (inj_recovered && (0 == errsts)) { mk_sense_buffer(devip, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0); errsts = check_condition_result; } else if (inj_dif && (0 == errsts)) { mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1); errsts = illegal_condition_result; } else if (inj_dix && (0 == errsts)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1); errsts = illegal_condition_result; } break; case WRITE_SAME_16: case WRITE_SAME: if (cmd[1] & 0x8) { if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); errsts = check_condition_result; } else unmap = 1; } if (errsts) break; errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap); break; case UNMAP: errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; } else errsts = resp_unmap(SCpnt, devip); break; case MODE_SENSE: case MODE_SENSE_10: errsts = resp_mode_sense(SCpnt, target, devip); break; case MODE_SELECT: errsts = resp_mode_select(SCpnt, 1, devip); break; case MODE_SELECT_10: errsts = resp_mode_select(SCpnt, 0, devip); break; case LOG_SENSE: errsts = resp_log_sense(SCpnt, devip); break; case SYNCHRONIZE_CACHE: delay_override = 1; errsts = check_readiness(SCpnt, 0, devip); break; case WRITE_BUFFER: errsts = check_readiness(SCpnt, 1, devip); break; case XDWRITEREAD_10: if (!scsi_bidi_cmnd(SCpnt)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); errsts = check_condition_result; break; } errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; if (scsi_debug_fake_rw) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); errsts = resp_read(SCpnt, lba, num, devip, ei_lba); if (errsts) break; errsts = resp_write(SCpnt, lba, num, devip, ei_lba); if (errsts) break; errsts = resp_xdwriteread(SCpnt, lba, num, devip); break; case VARIABLE_LENGTH_CMD: if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) { if ((cmd[10] & 0xe0) == 0) printk(KERN_ERR "Unprotected RD/WR to DIF device\n"); if (cmd[9] == READ_32) { BUG_ON(SCpnt->cmd_len < 32); goto read; } if (cmd[9] == WRITE_32) { BUG_ON(SCpnt->cmd_len < 32); goto write; } } mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); errsts = check_condition_result; break; default: if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) printk(KERN_INFO "scsi_debug: Opcode: 0x%x not " "supported\n", *cmd); errsts = check_readiness(SCpnt, 1, devip); if (errsts) break; /* Unit attention takes precedence */ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0); errsts = check_condition_result; break; } return schedule_resp(SCpnt, devip, done, errsts, (delay_override ? 0 : scsi_debug_delay)); } static struct scsi_host_template sdebug_driver_template = { .proc_info = scsi_debug_proc_info, .proc_name = sdebug_proc_name, .name = "SCSI DEBUG", .info = scsi_debug_info, .slave_alloc = scsi_debug_slave_alloc, .slave_configure = scsi_debug_slave_configure, .slave_destroy = scsi_debug_slave_destroy, .ioctl = scsi_debug_ioctl, .queuecommand = scsi_debug_queuecommand, .eh_abort_handler = scsi_debug_abort, .eh_bus_reset_handler = scsi_debug_bus_reset, .eh_device_reset_handler = scsi_debug_device_reset, .eh_host_reset_handler = scsi_debug_host_reset, .bios_param = scsi_debug_biosparam, .can_queue = SCSI_DEBUG_CANQUEUE, .this_id = 7, .sg_tablesize = 256, .cmd_per_lun = 16, .max_sectors = 0xffff, .use_clustering = DISABLE_CLUSTERING, .module = THIS_MODULE, }; static int sdebug_driver_probe(struct device * dev) { int error = 0; struct sdebug_host_info *sdbg_host; struct Scsi_Host *hpnt; int host_prot; sdbg_host = to_sdebug_host(dev); sdebug_driver_template.can_queue = scsi_debug_max_queue; hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); if (NULL == hpnt) { printk(KERN_ERR "%s: scsi_register failed\n", __func__); error = -ENODEV; return error; } sdbg_host->shost = hpnt; *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host; if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id)) hpnt->max_id = scsi_debug_num_tgts + 1; else hpnt->max_id = scsi_debug_num_tgts; hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */ host_prot = 0; switch (scsi_debug_dif) { case SD_DIF_TYPE1_PROTECTION: host_prot = SHOST_DIF_TYPE1_PROTECTION; if (scsi_debug_dix) host_prot |= SHOST_DIX_TYPE1_PROTECTION; break; case SD_DIF_TYPE2_PROTECTION: host_prot = SHOST_DIF_TYPE2_PROTECTION; if (scsi_debug_dix) host_prot |= SHOST_DIX_TYPE2_PROTECTION; break; case SD_DIF_TYPE3_PROTECTION: host_prot = SHOST_DIF_TYPE3_PROTECTION; if (scsi_debug_dix) host_prot |= SHOST_DIX_TYPE3_PROTECTION; break; default: if (scsi_debug_dix) host_prot |= SHOST_DIX_TYPE0_PROTECTION; break; } scsi_host_set_prot(hpnt, host_prot); printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n", (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); if (scsi_debug_guard == 1) scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP); else scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC); error = scsi_add_host(hpnt, &sdbg_host->dev); if (error) { printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); error = -ENODEV; scsi_host_put(hpnt); } else scsi_scan_host(hpnt); return error; } static int sdebug_driver_remove(struct device * dev) { struct sdebug_host_info *sdbg_host; struct sdebug_dev_info *sdbg_devinfo, *tmp; sdbg_host = to_sdebug_host(dev); if (!sdbg_host) { printk(KERN_ERR "%s: Unable to locate host info\n", __func__); return -ENODEV; } scsi_remove_host(sdbg_host->shost); list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list, dev_list) { list_del(&sdbg_devinfo->dev_list); kfree(sdbg_devinfo); } scsi_host_put(sdbg_host->shost); return 0; } static int pseudo_lld_bus_match(struct device *dev, struct device_driver *dev_driver) { return 1; } static struct bus_type pseudo_lld_bus = { .name = "pseudo", .match = pseudo_lld_bus_match, .probe = sdebug_driver_probe, .remove = sdebug_driver_remove, };
gpl-2.0
googyanas/Googy-Max-N4-Kernel
drivers/net/wireless/bcmdhd4358/dhd_msgbuf.c
18
131424
/* * Header file describing the internal (inter-module) DHD interfaces. * * Provides type definitions and function prototypes used to link the * DHD OS, bus, and protocol modules. * * Copyright (C) 1999-2014, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_msgbuf.c 503126 2014-09-17 11:58:42Z $ */ #include <typedefs.h> #include <osl.h> #include <bcmutils.h> #include <bcmmsgbuf.h> #include <bcmendian.h> #include <dngl_stats.h> #include <dhd.h> #include <dhd_proto.h> #include <dhd_bus.h> #include <dhd_dbg.h> #include <siutils.h> #include <dhd_flowring.h> #include <pcie_core.h> #include <bcmpcie.h> #include <dhd_pcie.h> /* * PCIE D2H DMA Complete Sync Modes * * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into * Host system memory. A WAR using one of 3 approaches is needed: * 1. Dongle places ia modulo-253 seqnum in last word of each D2H message * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum * writes in the last word of each work item. Each work item has a seqnum * number = sequence num % 253. * 3. Read Barrier: Dongle does a host memory read access prior to posting an * interrupt. * Host does not participate with option #3, other than reserving a host system * memory location for the dongle to read. */ #define PCIE_D2H_SYNC #define PCIE_D2H_SYNC_WAIT_TRIES 256 #define RETRIES 2 /* # of retries to retrieve matching ioctl response */ #define IOCTL_HDR_LEN 12 #define DEFAULT_RX_BUFFERS_TO_POST 256 #define RXBUFPOST_THRESHOLD 32 #define RX_BUF_BURST 16 #define DHD_STOP_QUEUE_THRESHOLD 200 #define DHD_START_QUEUE_THRESHOLD 100 #define MODX(x, n) ((x) & ((n) -1)) #define align(x, n) (MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n))) #define RX_DMA_OFFSET 8 #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) #define DMA_D2H_SCRATCH_BUF_LEN 8 #define DMA_ALIGN_LEN 4 #define DMA_XFER_LEN_LIMIT 0x400000 #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 #define DHD_FLOWRING_MAX_EVENTBUF_POST 8 #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 #define DHD_PROT_FUNCS 22 typedef struct dhd_mem_map { void *va; dmaaddr_t pa; void *dmah; } dhd_mem_map_t; typedef struct dhd_dmaxfer { dhd_mem_map_t srcmem; dhd_mem_map_t destmem; uint32 len; uint32 srcdelay; uint32 destdelay; } dhd_dmaxfer_t; #define TXP_FLUSH_NITEMS #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 typedef struct msgbuf_ring { bool inited; uint16 idx; uchar name[24]; dhd_mem_map_t ring_base; #ifdef TXP_FLUSH_NITEMS void* start_addr; uint16 pend_items_count; #endif /* TXP_FLUSH_NITEMS */ ring_mem_t *ringmem; ring_state_t *ringstate; #if defined(PCIE_D2H_SYNC) uint32 seqnum; #endif /* PCIE_D2H_SYNC */ } msgbuf_ring_t; #if defined(PCIE_D2H_SYNC) /* Custom callback attached based upon D2H DMA Sync mode used in dongle. */ typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); #endif /* PCIE_D2H_SYNC */ typedef struct dhd_prot { osl_t *osh; /* OSL handle */ uint32 reqid; uint32 lastcmd; uint32 pending; uint16 rxbufpost; uint16 max_rxbufpost; uint16 max_eventbufpost; uint16 max_ioctlrespbufpost; uint16 cur_event_bufs_posted; uint16 cur_ioctlresp_bufs_posted; uint16 active_tx_count; uint16 max_tx_count; uint16 txp_threshold; /* Ring info */ msgbuf_ring_t *h2dring_txp_subn; msgbuf_ring_t *h2dring_rxp_subn; msgbuf_ring_t *h2dring_ctrl_subn; /* Cbuf handle for H2D ctrl ring */ msgbuf_ring_t *d2hring_tx_cpln; msgbuf_ring_t *d2hring_rx_cpln; msgbuf_ring_t *d2hring_ctrl_cpln; /* Cbuf handle for D2H ctrl ring */ uint32 rx_dataoffset; dhd_mem_map_t retbuf; dhd_mem_map_t ioctbuf; /* For holding ioct request buf */ dhd_mb_ring_t mb_ring_fn; uint32 d2h_dma_scratch_buf_len; /* For holding ioct request buf */ dhd_mem_map_t d2h_dma_scratch_buf; /* For holding ioct request buf */ uint32 h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */ dhd_mem_map_t h2d_dma_writeindx_buf; /* For holding dma ringupd buf - submission write */ uint32 h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */ dhd_mem_map_t h2d_dma_readindx_buf; /* For holding dma ringupd buf - submission read */ uint32 d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */ dhd_mem_map_t d2h_dma_writeindx_buf; /* For holding dma ringupd buf - completion write */ uint32 d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */ dhd_mem_map_t d2h_dma_readindx_buf; /* For holding dma ringupd buf - completion read */ #if defined(PCIE_D2H_SYNC) d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ ulong d2h_sync_wait_tot; /* total wait loops */ #endif /* PCIE_D2H_SYNC */ dhd_dmaxfer_t dmaxfer; bool dmaxfer_in_progress; uint16 ioctl_seq_no; uint16 data_seq_no; uint16 ioctl_trans_id; void *pktid_map_handle; uint16 rx_metadata_offset; uint16 tx_metadata_offset; uint16 rx_cpln_early_upd_idx; struct mutex ioctl_mutex; /* Make IOCTL singleton in Prot Layer */ } dhd_prot_t; static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action); static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action); static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf); static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd); static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count); static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt); static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen); static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len); static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len); static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len); static void dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen); static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen); static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen); static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen); static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 msglen, uint16 *alloced); static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx); static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type); #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) static INLINE void dhd_prot_static_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_IOCTLBUF */ static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type); static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma); static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen); static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen); static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen); static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen); #ifdef DHD_RX_CHAINING #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); #define DHD_PKT_CTF_MAX_CHAIN_LEN 64 #endif /* DHD_RX_CHAINING */ static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post); static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring); static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid); static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced); static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index); static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid); static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len); static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring); static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len); static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring); typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen); static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { dhd_prot_noop, /* 0 is invalid message type */ dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ NULL, dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ NULL, dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ NULL, dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ NULL, dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ NULL, dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ NULL, dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ NULL, dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ NULL, dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */ NULL, dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ NULL, }; #if defined(PCIE_D2H_SYNC) /* * D2H DMA to completion callback handlers. Based on the mode advertised by the * dongle through the PCIE shared region, the appropriate callback will be * registered in the proto layer to be invoked prior to precessing any message * from a D2H DMA ring. If the dongle uses a read barrier or another mode that * does not require host participation, then a noop callback handler will be * bound that simply returns the msgtype. */ static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, uint32 tries, uchar *msg, int msglen); static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen); static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot); /* Debug print a livelock avert by dropping a D2H message */ static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 seqnum, uint32 tries, uchar *msg, int msglen) { DHD_ERROR(("LIVELOCK DHD<%p> seqnum<%u:%u> tries<%u> max<%lu> tot<%lu>\n", dhd, seqnum, seqnum% D2H_EPOCH_MODULO, tries, dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot)); prhex("D2H MsgBuf Failure", (uchar *)msg, msglen); #if defined(SUPPORT_LINKDOWN_RECOVERY) && defined(CONFIG_ARCH_MSM) dhd->bus->islinkdown = TRUE; dhd_os_check_hang(dhd, 0, -ETIMEDOUT); #endif /* SUPPORT_LINKDOWN_RECOVERY && CONFIG_ARCH_MSM */ } /* Sync on a D2H DMA to complete using SEQNUM mode */ static uint8 BCMFASTPATH dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen) { uint32 tries; uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; int num_words = msglen / sizeof(uint32); /* num of 32bit words */ volatile uint32 *marker = (uint32 *)msg + (num_words - 1); /* last word */ dhd_prot_t *prot = dhd->prot; ASSERT(msglen == RING_LEN_ITEMS(ring)); for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { uint32 msg_seqnum = *marker; if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ ring->seqnum++; /* next expected sequence number */ goto dma_completed; } if (tries > prot->d2h_sync_wait_max) prot->d2h_sync_wait_max = tries; OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ } /* for PCIE_D2H_SYNC_WAIT_TRIES */ dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen); ring->seqnum++; /* skip this message ... leak of a pktid */ return 0; /* invalid msgtype 0 -> noop callback */ dma_completed: prot->d2h_sync_wait_tot += tries; return msg->msg_type; } /* Sync on a D2H DMA to complete using XORCSUM mode */ static uint8 BCMFASTPATH dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen) { uint32 tries; uint32 prot_checksum = 0; /* computed checksum */ int num_words = msglen / sizeof(uint32); /* num of 32bit words */ uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; dhd_prot_t *prot = dhd->prot; ASSERT(msglen == RING_LEN_ITEMS(ring)); for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); if (prot_checksum == 0U) { /* checksum is OK */ if (msg->epoch == ring_seqnum) { ring->seqnum++; /* next expected sequence number */ goto dma_completed; } } if (tries > prot->d2h_sync_wait_max) prot->d2h_sync_wait_max = tries; OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ } /* for PCIE_D2H_SYNC_WAIT_TRIES */ dhd_prot_d2h_sync_livelock(dhd, ring->seqnum, tries, (uchar *)msg, msglen); ring->seqnum++; /* skip this message ... leak of a pktid */ return 0; /* invalid msgtype 0 -> noop callback */ dma_completed: prot->d2h_sync_wait_tot += tries; return msg->msg_type; } /* Do not sync on a D2H DMA */ static uint8 BCMFASTPATH dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, volatile cmn_msg_hdr_t *msg, int msglen) { return msg->msg_type; } /* Initialize the D2H DMA Sync mode, per D2H ring seqnum and dhd stats */ static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd, dhd_prot_t * prot) { prot->d2h_sync_wait_max = 0UL; prot->d2h_sync_wait_tot = 0UL; prot->d2hring_tx_cpln->seqnum = D2H_EPOCH_INIT_VAL; prot->d2hring_rx_cpln->seqnum = D2H_EPOCH_INIT_VAL; prot->d2hring_ctrl_cpln->seqnum = D2H_EPOCH_INIT_VAL; if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; else prot->d2h_sync_cb = dhd_prot_d2h_sync_none; } #endif /* PCIE_D2H_SYNC */ /* * +---------------------------------------------------------------------------+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. * The packet id map, also includes storage for some packet parameters that * may be saved. A native packet pointer along with the parameters may be saved * and a unique 32bit pkt id will be returned. Later, the saved packet pointer * and the metadata may be retrieved using the previously allocated packet id. * +---------------------------------------------------------------------------+ */ /* * Uses a FIFO dll with Nx more pktids instead of a LIFO stack. * If you wish to enable pktidaudit in firmware with FIFO PktId allocator, then * the total number of PktIds managed by the pktidaudit must be multiplied by * this DHD_PKTIDMAP_FIFO factor. */ #define DHD_PKTIDMAP_FIFO 4 #define MAX_PKTID_ITEMS (8192) /* Maximum number of pktids supported */ typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ /* Construct a packet id mapping table, returing an opaque map handle */ static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items); /* Destroy a packet id mapping table, freeing all packets active in the table */ static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map); /* Determine number of pktids that are available */ static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map); /* Allocate a unique pktid against which a pkt and some metadata is saved */ static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt); static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type); static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt, dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type); /* Return an allocated pktid, retrieving previously saved pkt and metadata */ static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id, dmaaddr_t *physaddr, uint32 *len, uint8 buf_type); /* Packet metadata saved in packet id mapper */ typedef enum pkt_buf_type { BUFF_TYPE_DATA_TX = 0, BUFF_TYPE_DATA_RX, BUFF_TYPE_IOCTL_RX, BUFF_TYPE_EVENT_RX, BUFF_TYPE_NO_CHECK } pkt_buf_type_t; typedef struct dhd_pktid_item { #if defined(DHD_PKTIDMAP_FIFO) dll_t list_node; /* MUST BE FIRST field */ uint32 nkey; #endif bool inuse; /* tag an item to be in use */ uint8 dma; /* map direction: flush or invalidate */ uint8 buf_type; uint16 len; /* length of mapped packet's buffer */ void *pkt; /* opaque native pointer to a packet */ dmaaddr_t physaddr; /* physical address of mapped packet's buffer */ } dhd_pktid_item_t; typedef struct dhd_pktid_map { void *osh; int items; /* total items in map */ int avail; /* total available items */ int failures; /* lockers unavailable count */ /* Unique PktId Allocator: FIFO dll, or LIFO:stack of keys */ #if defined(DHD_PKTIDMAP_FIFO) dll_t list_free; /* allocate from head, free to tail */ dll_t list_inuse; #else /* ! DHD_PKTIDMAP_FIFO */ uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */ #endif /* ! DHD_PKTIDMAP_FIFO */ dhd_pktid_item_t lockers[0]; /* metadata storage */ } dhd_pktid_map_t; /* * PktId (Locker) #0 is never allocated and is considered invalid. * * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a * depleted pktid pool and must not be used by the caller. * * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. */ #define DHD_PKTID_INVALID (0U) #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) #if defined(DHD_PKTIDMAP_FIFO) /* A 4x pool of pktids are managed with FIFO allocation. */ #define DHD_PKIDMAP_ITEMS(items) (items * DHD_PKTIDMAP_FIFO) #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ (DHD_PKTID_ITEM_SZ * ((DHD_PKTIDMAP_FIFO * (items)) + 1))) #else /* ! DHD_PKTIDMAP_FIFO */ #define DHD_PKIDMAP_ITEMS(items) (items) #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ (DHD_PKTID_ITEM_SZ * ((items) + 1))) #endif /* ! DHD_PKTIDMAP_FIFO */ #define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items)) #define NATIVE_TO_PKTID_FINI(map) dhd_pktid_map_fini(map) #define NATIVE_TO_PKTID_CLEAR(map) dhd_pktid_map_clear(map) #define NATIVE_TO_PKTID_RSV(map, pkt) dhd_pktid_map_reserve((map), (pkt)) #define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma, buf_type) \ dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ (uint8)dma, (uint8)buf_type) #define NATIVE_TO_PKTID(map, pkt, pa, len, dma, buf_type) \ dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), \ (uint8)dma, (uint8)buf_type) #define PKTID_TO_NATIVE(map, pktid, pa, len, buf_type) \ dhd_pktid_map_free((map), (uint32)(pktid), \ (dmaaddr_t *)&(pa), (uint32 *)&(len), (uint8)buf_type) #define PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) /* * +---------------------------------------------------------------------------+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm. * * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS]. * * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique * packet id is returned. This unique packet id may be used to retrieve the * previously saved packet metadata, using dhd_pktid_map_free(). On invocation * of dhd_pktid_map_free(), the unique packet id is essentially freed. A * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. * * Implementation Note: * Convert this into a <key,locker> abstraction and place into bcmutils ! * Locker abstraction should treat contents as opaque storage, and a * callback should be registered to handle inuse lockers on destructor. * * +---------------------------------------------------------------------------+ */ /* Allocate and initialize a mapper of num_items <numbered_key, locker> */ static dhd_pktid_map_handle_t * dhd_pktid_map_init(void *osh, uint32 num_items) { uint32 nkey; dhd_pktid_map_t *map; uint32 dhd_pktid_map_sz; uint32 map_items; ASSERT((num_items >= 1) && (num_items <= MAX_PKTID_ITEMS)); dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) { DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", __FUNCTION__, __LINE__, dhd_pktid_map_sz)); return NULL; } bzero(map, dhd_pktid_map_sz); map->osh = osh; map->items = num_items; map->avail = num_items; map_items = DHD_PKIDMAP_ITEMS(map->items); #if defined(DHD_PKTIDMAP_FIFO) /* Initialize all dll */ dll_init(&map->list_free); dll_init(&map->list_inuse); /* Initialize and place all 4 x items in map's list_free */ for (nkey = 0; nkey <= map_items; nkey++) { dll_init(&map->lockers[nkey].list_node); map->lockers[nkey].inuse = FALSE; map->lockers[nkey].nkey = nkey; map->lockers[nkey].pkt = NULL; /* bzero: redundant */ map->lockers[nkey].len = 0; /* Free at tail */ dll_append(&map->list_free, &map->lockers[nkey].list_node); } /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */ map->lockers[DHD_PKTID_INVALID].inuse = TRUE; dll_delete(&map->lockers[DHD_PKTID_INVALID].list_node); dll_append(&map->list_inuse, &map->lockers[DHD_PKTID_INVALID].list_node); #else /* ! DHD_PKTIDMAP_FIFO */ map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */ for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ map->keys[nkey] = nkey; /* populate with unique keys */ map->lockers[nkey].inuse = FALSE; map->lockers[nkey].pkt = NULL; /* bzero: redundant */ map->lockers[nkey].len = 0; } #endif /* ! DHD_PKTIDMAP_FIFO */ return (dhd_pktid_map_handle_t *)map; /* opaque handle */ } /* * Retrieve all allocated keys and free all <numbered_key, locker>. * Freeing implies: unmapping the buffers and freeing the native packet * This could have been a callback registered with the pktid mapper. */ static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle) { void *osh; int nkey; dhd_pktid_map_t *map; uint32 dhd_pktid_map_sz; dhd_pktid_item_t *locker; uint32 map_items; if (handle == NULL) return; map = (dhd_pktid_map_t *)handle; osh = map->osh; dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); nkey = 1; /* skip reserved KEY #0, and start from 1 */ locker = &map->lockers[nkey]; map_items = DHD_PKIDMAP_ITEMS(map->items); for (; nkey <= map_items; nkey++, locker++) { if (locker->inuse == TRUE) { /* numbered key still in use */ locker->inuse = FALSE; /* force open the locker */ { /* This could be a callback registered with dhd_pktid_map */ DMA_UNMAP(osh, locker->physaddr, locker->len, locker->dma, 0, 0); #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) PKTFREE_STATIC(osh, (ulong*)locker->pkt, FALSE); #else PKTFREE(osh, (ulong*)locker->pkt, FALSE); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_EVENTBUF */ } } locker->pkt = NULL; /* clear saved pkt */ locker->len = 0; } MFREE(osh, handle, dhd_pktid_map_sz); } static void dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle) { void *osh; int nkey; dhd_pktid_map_t *map; dhd_pktid_item_t *locker; uint32 map_items; DHD_TRACE(("%s\n", __FUNCTION__)); if (handle == NULL) return; map = (dhd_pktid_map_t *)handle; osh = map->osh; map->failures = 0; nkey = 1; /* skip reserved KEY #0, and start from 1 */ locker = &map->lockers[nkey]; map_items = DHD_PKIDMAP_ITEMS(map->items); for (; nkey <= map_items; nkey++, locker++) { #if !defined(DHD_PKTIDMAP_FIFO) map->keys[nkey] = nkey; /* populate with unique keys */ #endif /* ! DHD_PKTIDMAP_FIFO */ if (locker->inuse == TRUE) { /* numbered key still in use */ #if defined(DHD_PKTIDMAP_FIFO) ASSERT(locker->nkey == nkey); dll_delete(&locker->list_node); dll_append(&map->list_free, &locker->list_node); #endif /* DHD_PKTIDMAP_FIFO */ locker->inuse = FALSE; /* force open the locker */ DHD_TRACE(("%s free id%d\n", __FUNCTION__, nkey)); DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len, locker->dma, 0, 0); #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) PKTFREE_STATIC(osh, (ulong*)locker->pkt, FALSE); #else PKTFREE(osh, (ulong*)locker->pkt, FALSE); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_EVENTBUF */ } locker->pkt = NULL; /* clear saved pkt */ locker->len = 0; } map->avail = map->items; } /* Get the pktid free count */ static INLINE uint32 BCMFASTPATH dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) { dhd_pktid_map_t *map; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; return map->avail; } /* * Allocate locker, save pkt contents, and return the locker's numbered key. * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility. * Caller must treat a returned value DHD_PKTID_INVALID as a failure case, * implying a depleted pool of pktids. */ static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt) { uint32 nkey; dhd_pktid_map_t *map; dhd_pktid_item_t *locker; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; if (map->avail <= 0) { /* no more pktids to allocate */ map->failures++; DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); return DHD_PKTID_INVALID; /* failed alloc request */ } ASSERT(map->avail <= map->items); #if defined(DHD_PKTIDMAP_FIFO) ASSERT(!dll_empty(&map->list_free)); /* Move list_free head item to inuse list, fetch key in head node */ locker = (dhd_pktid_item_t *)dll_head_p(&map->list_free); dll_delete(&locker->list_node); nkey = locker->nkey; dll_append(&map->list_inuse, &locker->list_node); #else /* ! DHD_PKTIDMAP_FIFO */ nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ locker = &map->lockers[nkey]; /* save packet metadata in locker */ #endif /* ! DHD_PKTIDMAP_FIFO */ map->avail--; locker->inuse = TRUE; /* reserve this locker */ locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ locker->len = 0; ASSERT(nkey != DHD_PKTID_INVALID); return nkey; /* return locker's numbered key */ } static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type) { dhd_pktid_map_t *map; dhd_pktid_item_t *locker; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); locker = &map->lockers[nkey]; ASSERT((locker->pkt == pkt) && (locker->inuse == TRUE)); locker->dma = dma; /* store contents in locker */ locker->buf_type = buf_type; locker->physaddr = physaddr; locker->len = (uint16)len; /* 16bit len */ } static uint32 BCMFASTPATH dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt, dmaaddr_t physaddr, uint32 len, uint8 dma, uint8 buf_type) { uint32 nkey = dhd_pktid_map_reserve(handle, pkt); if (nkey != DHD_PKTID_INVALID) { dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma, buf_type); } return nkey; } /* * Given a numbered key, return the locker contents. * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid * value. Only a previously allocated pktid may be freed. */ static void * BCMFASTPATH dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey, dmaaddr_t *physaddr, uint32 *len, uint8 buf_type) { dhd_pktid_map_t *map; dhd_pktid_item_t *locker; void * pkt; ASSERT(handle != NULL); map = (dhd_pktid_map_t *)handle; ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= DHD_PKIDMAP_ITEMS(map->items))); locker = &map->lockers[nkey]; if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */ DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n", __FUNCTION__, __LINE__, nkey)); ASSERT(locker->inuse != FALSE); return NULL; } if ((buf_type != BUFF_TYPE_NO_CHECK) && (locker->buf_type != buf_type)) { DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", __FUNCTION__, __LINE__, nkey)); ASSERT(locker->buf_type == buf_type); return NULL; } map->avail++; #if defined(DHD_PKTIDMAP_FIFO) ASSERT(locker->nkey == nkey); dll_delete(&locker->list_node); /* Free locker to "tail" of free list */ dll_append(&map->list_free, &locker->list_node); #else /* ! DHD_PKTIDMAP_FIFO */ map->keys[map->avail] = nkey; /* make this numbered key available */ #endif /* ! DHD_PKTIDMAP_FIFO */ locker->inuse = FALSE; /* open and free Locker */ *physaddr = locker->physaddr; /* return contents of locker */ *len = (uint32)locker->len; pkt = locker->pkt; locker->pkt = NULL; /* Clear pkt */ locker->len = 0; return pkt; } /* Linkage, sets prot link and updates hdrlen in pub */ int dhd_prot_attach(dhd_pub_t *dhd) { uint alloced = 0; dhd_prot_t *prot; /* Allocate prot structure */ if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) { DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); goto fail; } memset(prot, 0, sizeof(*prot)); prot->osh = dhd->osh; dhd->prot = prot; /* DMAing ring completes supported? FALSE by default */ dhd->dma_d2h_ring_upd_support = FALSE; dhd->dma_h2d_ring_upd_support = FALSE; /* Ring Allocations */ /* 1.0 H2D TXPOST ring */ if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp", H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, BCMPCIE_H2D_TXFLOWRINGID))) { DHD_ERROR(("%s: kmalloc for H2D TXPOST ring failed\n", __FUNCTION__)); goto fail; } /* 2.0 H2D RXPOST ring */ if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp", H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) { DHD_ERROR(("%s: kmalloc for H2D RXPOST ring failed\n", __FUNCTION__)); goto fail; } /* 3.0 H2D CTRL_SUBMISSION ring */ if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl", H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) { DHD_ERROR(("%s: kmalloc for H2D CTRL_SUBMISSION ring failed\n", __FUNCTION__)); goto fail; } /* 4.0 D2H TX_COMPLETION ring */ if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl", D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, BCMPCIE_D2H_MSGRING_TX_COMPLETE))) { DHD_ERROR(("%s: kmalloc for D2H TX_COMPLETION ring failed\n", __FUNCTION__)); goto fail; } /* 5.0 D2H RX_COMPLETION ring */ if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl", D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, BCMPCIE_D2H_MSGRING_RX_COMPLETE))) { DHD_ERROR(("%s: kmalloc for D2H RX_COMPLETION ring failed\n", __FUNCTION__)); goto fail; } /* 6.0 D2H CTRL_COMPLETION ring */ if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl", D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) { DHD_ERROR(("%s: kmalloc for D2H CTRL_COMPLETION ring failed\n", __FUNCTION__)); goto fail; } /* Return buffer for ioctl */ prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN, &alloced, &prot->retbuf.pa, &prot->retbuf.dmah); if (prot->retbuf.va == NULL) { ASSERT(0); return BCME_NOMEM; } ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0); bzero(prot->retbuf.va, IOCT_RETBUF_SIZE); OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); /* IOCTL request buffer */ prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN, &alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah); if (prot->ioctbuf.va == NULL) { ASSERT(0); return BCME_NOMEM; } ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0); bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE); OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); /* Scratch buffer for dma rx offset */ prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN; prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN, DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa, &prot->d2h_dma_scratch_buf.dmah); if (prot->d2h_dma_scratch_buf.va == NULL) { ASSERT(0); return BCME_NOMEM; } ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0); bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); /* PKTID handle INIT */ prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS); if (prot->pktid_map_handle == NULL) { ASSERT(0); return BCME_NOMEM; } prot->dmaxfer.srcmem.va = NULL; prot->dmaxfer.destmem.va = NULL; prot->dmaxfer_in_progress = FALSE; prot->rx_metadata_offset = 0; prot->tx_metadata_offset = 0; #ifdef DHD_RX_CHAINING dhd_rxchain_reset(&prot->rxchain); #endif return 0; fail: #ifndef CONFIG_DHD_USE_STATIC_BUF if (prot != NULL) dhd_prot_detach(dhd); #endif /* CONFIG_DHD_USE_STATIC_BUF */ return BCME_NOMEM; } /* Init memory block on host DMA'ing indices */ int dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length) { uint alloced = 0; dhd_prot_t *prot = dhd->prot; uint32 dma_block_size = 4 * length; if (prot == NULL) { DHD_ERROR(("prot is not inited\n")); return BCME_ERROR; } switch (type) { case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER: /* ring update dma buffer for submission write */ prot->h2d_dma_writeindx_buf_len = dma_block_size; prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, dma_block_size, DMA_ALIGN_LEN, &alloced, &prot->h2d_dma_writeindx_buf.pa, &prot->h2d_dma_writeindx_buf.dmah); if (prot->h2d_dma_writeindx_buf.va == NULL) { return BCME_NOMEM; } ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4)); bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size); OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size); DHD_ERROR(("H2D_WRITEINDX_ARRAY_HOST: %d-bytes " "inited for dma'ing h2d-w indices\n", prot->h2d_dma_writeindx_buf_len)); break; case HOST_TO_DNGL_DMA_READINDX_BUFFER: /* ring update dma buffer for submission read */ prot->h2d_dma_readindx_buf_len = dma_block_size; prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, dma_block_size, DMA_ALIGN_LEN, &alloced, &prot->h2d_dma_readindx_buf.pa, &prot->h2d_dma_readindx_buf.dmah); if (prot->h2d_dma_readindx_buf.va == NULL) { return BCME_NOMEM; } ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4)); bzero(prot->h2d_dma_readindx_buf.va, dma_block_size); OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size); DHD_ERROR(("H2D_READINDX_ARRAY_HOST %d-bytes " "inited for dma'ing h2d-r indices\n", prot->h2d_dma_readindx_buf_len)); break; case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER: /* ring update dma buffer for completion write */ prot->d2h_dma_writeindx_buf_len = dma_block_size; prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, dma_block_size, DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_writeindx_buf.pa, &prot->d2h_dma_writeindx_buf.dmah); if (prot->d2h_dma_writeindx_buf.va == NULL) { return BCME_NOMEM; } ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4)); bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size); OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size); DHD_ERROR(("D2H_WRITEINDX_ARRAY_HOST %d-bytes " "inited for dma'ing d2h-w indices\n", prot->d2h_dma_writeindx_buf_len)); break; case DNGL_TO_HOST_DMA_READINDX_BUFFER: /* ring update dma buffer for completion read */ prot->d2h_dma_readindx_buf_len = dma_block_size; prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, dma_block_size, DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_readindx_buf.pa, &prot->d2h_dma_readindx_buf.dmah); if (prot->d2h_dma_readindx_buf.va == NULL) { return BCME_NOMEM; } ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4)); bzero(prot->d2h_dma_readindx_buf.va, dma_block_size); OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size); DHD_ERROR(("D2H_READINDX_ARRAY_HOST %d-bytes " "inited for dma'ing d2h-r indices\n", prot->d2h_dma_readindx_buf_len)); break; default: DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); return BCME_BADOPTION; } return BCME_OK; } /* Unlink, frees allocated protocol memory (including dhd_prot) */ void dhd_prot_detach(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; /* Stop the protocol module */ if (dhd->prot) { /* free up scratch buffer */ if (prot->d2h_dma_scratch_buf.va) { DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa, prot->d2h_dma_scratch_buf.dmah); prot->d2h_dma_scratch_buf.va = NULL; } /* free up ring upd buffer for submission writes */ if (prot->h2d_dma_writeindx_buf.va) { DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa, prot->h2d_dma_writeindx_buf.dmah); prot->h2d_dma_writeindx_buf.va = NULL; } /* free up ring upd buffer for submission reads */ if (prot->h2d_dma_readindx_buf.va) { DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va, prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa, prot->h2d_dma_readindx_buf.dmah); prot->h2d_dma_readindx_buf.va = NULL; } /* free up ring upd buffer for completion writes */ if (prot->d2h_dma_writeindx_buf.va) { DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa, prot->d2h_dma_writeindx_buf.dmah); prot->d2h_dma_writeindx_buf.va = NULL; } /* free up ring upd buffer for completion writes */ if (prot->d2h_dma_readindx_buf.va) { DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa, prot->d2h_dma_readindx_buf.dmah); prot->d2h_dma_readindx_buf.va = NULL; } /* ioctl return buffer */ if (prot->retbuf.va) { DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va, IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah); dhd->prot->retbuf.va = NULL; } /* ioctl request buffer */ if (prot->ioctbuf.va) { DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va, IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah); dhd->prot->ioctbuf.va = NULL; } /* 1.0 H2D TXPOST ring */ dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn); /* 2.0 H2D RXPOST ring */ dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn); /* 3.0 H2D CTRL_SUBMISSION ring */ dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn); /* 4.0 D2H TX_COMPLETION ring */ dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln); /* 5.0 D2H RX_COMPLETION ring */ dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln); /* 6.0 D2H CTRL_COMPLETION ring */ dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln); NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle); #ifndef CONFIG_DHD_USE_STATIC_BUF MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); #endif /* CONFIG_DHD_USE_STATIC_BUF */ dhd->prot = NULL; } } void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) { dhd_prot_t *prot = dhd->prot; prot->rx_dataoffset = rx_offset; } /* Initialize protocol: sync w/dongle state. * Sets dongle media info (iswl, drv_version, mac address). */ int dhd_sync_with_dongle(dhd_pub_t *dhd) { int ret = 0; wlc_rev_info_t revinfo; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); /* Post event buffer after shim layer is attached */ ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd); if (ret <= 0) { DHD_ERROR(("%s : Post event buffer fail. ret = %d\n", __FUNCTION__, ret)); return ret; } dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); #ifdef CUSTOMER_HW4 /* Check the memdump capability */ dhd_get_memdump_info(dhd); #endif /* CUSTOMER_HW4 */ /* Get the device rev info */ memset(&revinfo, 0, sizeof(revinfo)); ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); if (ret < 0) goto done; dhd_process_cid_mac(dhd, TRUE); ret = dhd_preinit_ioctls(dhd); if (!ret) dhd_process_cid_mac(dhd, FALSE); /* Always assumes wl for now */ dhd->iswl = TRUE; done: return ret; } /* This function does all necessary initialization needed * for IOCTL/IOVAR path */ int dhd_prot_init(dhd_pub_t *dhd) { int ret = 0; dhd_prot_t *prot = dhd->prot; /* Max pkts in ring */ prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM; DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count)); /* Read max rx packets supported by dongle */ dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); if (prot->max_rxbufpost == 0) { /* This would happen if the dongle firmware is not */ /* using the latest shared structure template */ prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; } DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; prot->active_tx_count = 0; prot->data_seq_no = 0; prot->ioctl_seq_no = 0; prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; prot->ioctl_trans_id = 1; mutex_init(&prot->ioctl_mutex); /* Register the interrupt function upfront */ /* remove corerev checks in data path */ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); /* Initialise rings */ /* 1.0 H2D TXPOST ring */ if (dhd_bus_is_txmode_push(dhd->bus)) { dhd_ring_init(dhd, prot->h2dring_txp_subn); } /* 2.0 H2D RXPOST ring */ dhd_ring_init(dhd, prot->h2dring_rxp_subn); /* 3.0 H2D CTRL_SUBMISSION ring */ dhd_ring_init(dhd, prot->h2dring_ctrl_subn); /* 4.0 D2H TX_COMPLETION ring */ dhd_ring_init(dhd, prot->d2hring_tx_cpln); /* 5.0 D2H RX_COMPLETION ring */ dhd_ring_init(dhd, prot->d2hring_rx_cpln); /* 6.0 D2H CTRL_COMPLETION ring */ dhd_ring_init(dhd, prot->d2hring_ctrl_cpln); #if defined(PCIE_D2H_SYNC) dhd_prot_d2h_sync_init(dhd, prot); #endif /* PCIE_D2H_SYNC */ /* init the scratch buffer */ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa, sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0); dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len, sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0); /* If supported by the host, indicate the memory block * for comletion writes / submission reads to shared space */ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa, sizeof(prot->d2h_dma_writeindx_buf.pa), DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0); dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa, sizeof(prot->h2d_dma_readindx_buf.pa), HOST_TO_DNGL_DMA_READINDX_BUFFER, 0); } if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) { dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa, sizeof(prot->h2d_dma_writeindx_buf.pa), HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0); dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa, sizeof(prot->d2h_dma_readindx_buf.pa), DNGL_TO_HOST_DMA_READINDX_BUFFER, 0); } ret = dhd_msgbuf_rxbuf_post(dhd); ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); return ret; } #define DHD_DBG_SHOW_METADATA 0 #if DHD_DBG_SHOW_METADATA static void BCMFASTPATH dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) { uint8 tlv_t; uint8 tlv_l; uint8 *tlv_v = (uint8 *)ptr; if (len <= BCMPCIE_D2H_METADATA_HDRLEN) return; len -= BCMPCIE_D2H_METADATA_HDRLEN; tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; while (len > TLV_HDR_LEN) { tlv_t = tlv_v[TLV_TAG_OFF]; tlv_l = tlv_v[TLV_LEN_OFF]; len -= TLV_HDR_LEN; tlv_v += TLV_HDR_LEN; if (len < tlv_l) break; if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) break; switch (tlv_t) { case WLFC_CTL_TYPE_TXSTATUS: bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_RSSI: bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_FIFO_CREDITBACK: bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_TX_ENTRY_STAMP: bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_RX_STAMP: bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_TRANS_ID: bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); break; case WLFC_CTL_TYPE_COMP_TXSTATUS: bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); break; default: bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); break; } len -= tlv_l; tlv_v += tlv_l; } } #endif /* DHD_DBG_SHOW_METADATA */ static INLINE void BCMFASTPATH dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type) { void *PKTBUF; dmaaddr_t pa; uint32 pa_len; PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, buf_type); if (PKTBUF) { DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0); PKTFREE(dhd->osh, PKTBUF, FALSE); } return; } #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) static INLINE void BCMFASTPATH dhd_prot_static_packet_free(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type) { void *PKTBUF; dmaaddr_t pa; uint32 pa_len; PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, buf_type); if (PKTBUF) { DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0); PKTFREE_STATIC(dhd->osh, PKTBUF, FALSE); } return; } #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_IOCTLBUF */ static INLINE void * BCMFASTPATH dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 buf_type) { void *PKTBUF; dmaaddr_t pa; uint32 pa_len; PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len, buf_type); if (PKTBUF) { DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0); } return PKTBUF; } static int BCMFASTPATH dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; int16 fillbufs; uint16 cnt = 64; int retcount = 0; fillbufs = prot->max_rxbufpost - prot->rxbufpost; while (fillbufs > 0) { cnt--; if (cnt == 0) { /* find a better way to reschedule rx buf post if space not available */ DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); break; } /* Post in a burst of 8 buffers ata time */ fillbufs = MIN(fillbufs, RX_BUF_BURST); /* Post buffers */ retcount = dhd_prot_rxbufpost(dhd, fillbufs); if (retcount > 0) { prot->rxbufpost += (uint16)retcount; /* how many more to post */ fillbufs = prot->max_rxbufpost - prot->rxbufpost; } else { /* Make sure we don't run loop any further */ fillbufs = 0; } } return 0; } /* Post count no of rx buffers down to dongle */ static int BCMFASTPATH dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count) { void *p; uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; uint8 *rxbuf_post_tmp; host_rxbuf_post_t *rxbuf_post; void* msg_start; dmaaddr_t physaddr; uint32 pktlen; dhd_prot_t *prot = dhd->prot; msgbuf_ring_t * ring = prot->h2dring_rxp_subn; uint8 i = 0; uint16 alloced = 0; unsigned long flags; DHD_GENERAL_LOCK(dhd, flags); /* Claim space for 'count' no of messages */ msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced); DHD_GENERAL_UNLOCK(dhd, flags); if (msg_start == NULL) { DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); return -1; } /* if msg_start != NULL, we should have alloced space for atleast 1 item */ ASSERT(alloced > 0); rxbuf_post_tmp = (uint8*)msg_start; /* loop through each message */ for (i = 0; i < alloced; i++) { rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; /* Create a rx buffer */ if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); break; } pktlen = PKTLEN(dhd->osh, p); physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); if (PHYSADDRISZERO(physaddr)) { DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); PKTFREE(dhd->osh, p, FALSE); DHD_ERROR(("Invalid phyaddr 0\n")); ASSERT(0); break; } PKTPULL(dhd->osh, p, prot->rx_metadata_offset); pktlen = PKTLEN(dhd->osh, p); /* CMN msg header */ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; rxbuf_post->cmn_hdr.if_id = 0; /* get the lock before calling NATIVE_TO_PKTID */ DHD_GENERAL_LOCK(dhd, flags); rxbuf_post->cmn_hdr.request_id = htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX, BUFF_TYPE_DATA_RX)); /* free lock */ DHD_GENERAL_UNLOCK(dhd, flags); if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); PKTFREE(dhd->osh, p, FALSE); DHD_ERROR(("Pktid pool depleted.\n")); break; } rxbuf_post->data_buf_len = htol16((uint16)pktlen); rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); rxbuf_post->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset); if (prot->rx_metadata_offset) { rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); } else { rxbuf_post->metadata_buf_len = 0; rxbuf_post->metadata_buf_addr.high_addr = 0; rxbuf_post->metadata_buf_addr.low_addr = 0; } /* Move rxbuf_post_tmp to next item */ rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring); } if (i < alloced) { if (RING_WRITE_PTR(ring) < (alloced - i)) RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - (alloced - i); else RING_WRITE_PTR(ring) -= (alloced - i); alloced = i; } /* Update the write pointer in TCM & ring bell */ if (alloced > 0) prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced); return alloced; } static int dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf) { void *p; uint16 pktsz; ioctl_resp_evt_buf_post_msg_t *rxbuf_post; dmaaddr_t physaddr; uint32 pktlen; dhd_prot_t *prot = dhd->prot; uint16 alloced = 0; unsigned long flags; uint8 buf_type; if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return -1; } if (event_buf) { /* Allocate packet for event buffer post */ pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; } else { /* Allocate packet for ctrl/ioctl buffer post */ pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; } #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) if (!event_buf) p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); else #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_EVENTBUF */ p = PKTGET(dhd->osh, pktsz, FALSE); if (p == NULL) { DHD_ERROR(("%s:%d: PKTGET for %s rxbuf failed\n", __FUNCTION__, __LINE__, event_buf ? "event" : "ioctl")); return -1; } pktlen = PKTLEN(dhd->osh, p); physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); if (PHYSADDRISZERO(physaddr)) { DHD_ERROR(("Invalid phyaddr 0\n")); ASSERT(0); goto free_pkt_return; } DHD_GENERAL_LOCK(dhd, flags); rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (rxbuf_post == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer" " for %s\n", __FUNCTION__, __LINE__, event_buf ? "event" : "ioctl")); DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); goto free_pkt_return; } /* CMN msg header */ if (event_buf) rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST; else rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST; rxbuf_post->cmn_hdr.if_id = 0; buf_type = ((event_buf == 1) ? BUFF_TYPE_EVENT_RX : BUFF_TYPE_IOCTL_RX); rxbuf_post->cmn_hdr.request_id = htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX, buf_type)); if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0) RING_WRITE_PTR(prot->h2dring_ctrl_subn) = RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1; else RING_WRITE_PTR(prot->h2dring_ctrl_subn)--; DHD_GENERAL_UNLOCK(dhd, flags); DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0); goto free_pkt_return; } rxbuf_post->cmn_hdr.flags = 0; rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); /* Update the write pointer in TCM & ring bell */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return 1; free_pkt_return: #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) if (!event_buf) PKTFREE_STATIC(dhd->osh, p, FALSE); else #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_EVENTBUF */ PKTFREE(dhd->osh, p, FALSE); return -1; } static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post) { uint32 i = 0; int32 ret_val; DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf)); if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return 0; } while (i < max_to_post) { ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf); if (ret_val < 0) break; i++; } DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf)); return (uint16)i; } static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; uint16 retcnt = 0; DHD_INFO(("ioctl resp buf post\n")); if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return 0; } retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE, prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted); prot->cur_ioctlresp_bufs_posted += retcnt; return retcnt; } static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; uint16 retcnt = 0; if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return 0; } retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE, prot->max_eventbufpost - prot->cur_event_bufs_posted); prot->cur_event_bufs_posted += retcnt; return retcnt; } bool BCMFASTPATH dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) { dhd_prot_t *prot = dhd->prot; bool more = TRUE; uint n = 0; /* Process all the messages - DTOH direction */ while (TRUE) { uint8 *src_addr; uint16 src_len; /* Store current read pointer */ /* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */ prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln); /* Get the message from ring */ src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len); if (src_addr == NULL) { more = FALSE; break; } /* Prefetch data to populate the cache */ OSL_PREFETCH(src_addr); if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr, src_len) != BCME_OK) { prot_upd_read_idx(dhd, prot->d2hring_rx_cpln); DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", __FUNCTION__, src_len)); } /* After batch processing, check RX bound */ n += src_len/RING_LEN_ITEMS(prot->d2hring_rx_cpln); if (n >= bound) { break; } } return more; } void dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info) { uint16 r_index = 0; msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info; /* Update read pointer */ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx); ring->ringstate->r_offset = r_index; } DHD_TRACE(("flow %d, write %d read %d \n\n", flow_id, RING_WRITE_PTR(ring), RING_READ_PTR(ring))); /* Need more logic here, but for now use it directly */ dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE); } bool BCMFASTPATH dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) { dhd_prot_t *prot = dhd->prot; bool more = TRUE; uint n = 0; /* Process all the messages - DTOH direction */ while (TRUE) { uint8 *src_addr; uint16 src_len; src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len); if (src_addr == NULL) { more = FALSE; break; } /* Prefetch data to populate the cache */ OSL_PREFETCH(src_addr); if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr, src_len) != BCME_OK) { DHD_ERROR(("%s: Error at process txcmpl msgbuf of len %d\n", __FUNCTION__, src_len)); } /* Write to dngl rd ptr */ prot_upd_read_idx(dhd, prot->d2hring_tx_cpln); /* After batch processing, check bound */ n += src_len/RING_LEN_ITEMS(prot->d2hring_tx_cpln); if (n >= bound) { break; } } return more; } int BCMFASTPATH dhd_prot_process_ctrlbuf(dhd_pub_t * dhd) { dhd_prot_t *prot = dhd->prot; /* Process all the messages - DTOH direction */ while (TRUE) { uint8 *src_addr; uint16 src_len; src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len); if (src_addr == NULL) { break; } /* Prefetch data to populate the cache */ OSL_PREFETCH(src_addr); if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr, src_len) != BCME_OK) { DHD_ERROR(("%s: Error at process ctrlmsgbuf of len %d\n", __FUNCTION__, src_len)); } /* Write to dngl rd ptr */ prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln); } return 0; } static int BCMFASTPATH dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len) { dhd_prot_t *prot = dhd->prot; uint32 cur_dma_len = 0; int ret = BCME_OK; DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len)); while (len > 0) { ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset)); if (prot->rx_dataoffset) { cur_dma_len = *(uint32 *) buf; ASSERT(cur_dma_len <= len); buf += prot->rx_dataoffset; len -= (uint16)prot->rx_dataoffset; } else { cur_dma_len = len; } if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) { DHD_ERROR(("%s: Error at process msg of dmalen %d\n", __FUNCTION__, cur_dma_len)); ret = BCME_ERROR; } len -= (uint16)cur_dma_len; buf += cur_dma_len; } return ret; } static int BCMFASTPATH dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len) { uint16 pktlen = len; uint16 msglen; uint8 msgtype; cmn_msg_hdr_t *msg = NULL; int ret = BCME_OK; #if defined(PCIE_D2H_SYNC_BZERO) uint8 *buf_head = buf; #endif /* PCIE_D2H_SYNC_BZERO */ ASSERT(ring && ring->ringmem); msglen = RING_LEN_ITEMS(ring); if (msglen == 0) { DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n", __FUNCTION__, ring->idx, msglen, pktlen)); return BCME_ERROR; } while (pktlen > 0) { msg = (cmn_msg_hdr_t *)buf; #if defined(PCIE_D2H_SYNC) /* Wait until DMA completes, then fetch msgtype */ msgtype = dhd->prot->d2h_sync_cb(dhd, ring, msg, msglen); #else msgtype = msg->msg_type; #endif /* !PCIE_D2H_SYNC */ DHD_INFO(("msgtype %d, msglen is %d, pktlen is %d \n", msgtype, msglen, pktlen)); if (msgtype == MSG_TYPE_LOOPBACK) { bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen); DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", msglen)); } if (msgtype >= DHD_PROT_FUNCS) { DHD_ERROR(("%s: msgtype %d, msglen is %d, pktlen is %d \n", __FUNCTION__, msgtype, msglen, pktlen)); ret = BCME_ERROR; goto done; } if (table_lookup[msgtype]) { table_lookup[msgtype](dhd, buf, msglen); } if (pktlen < msglen) { ret = BCME_ERROR; goto done; } pktlen = pktlen - msglen; buf = buf + msglen; if (msgtype == MSG_TYPE_RX_CMPLT) prot_early_upd_rxcpln_read_idx(dhd, dhd->prot->d2hring_rx_cpln); } done: #if defined(PCIE_D2H_SYNC_BZERO) OSL_CACHE_FLUSH(buf_head, len - pktlen); /* Flush the bzeroed msg */ #endif /* PCIE_D2H_SYNC_BZERO */ #ifdef DHD_RX_CHAINING dhd_rxchain_commit(dhd); #endif return ret; } static void dhd_prot_noop(dhd_pub_t *dhd, void * buf, uint16 msglen) { return; } static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) { pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf; DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n", ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status, ring_status->compl_hdr.flow_ring_id, ring_status->write_idx)); /* How do we track this to pair it with ??? */ return; } static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) { pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf; DHD_ERROR(("gen status: request_id %d, status 0x%04x, flow ring %d \n", gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, gen_status->compl_hdr.flow_ring_id)); /* How do we track this to pair it with ??? */ return; } static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen) { ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf; DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, ioct_ack->compl_hdr.flow_ring_id)); if (ioct_ack->compl_hdr.status != 0) { DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); } #if defined(PCIE_D2H_SYNC_BZERO) memset(buf, 0, msglen); #endif /* PCIE_D2H_SYNC_BZERO */ } static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen) { uint16 status; uint32 resp_len = 0; uint32 pkt_id, xt_id; ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf; resp_len = ltoh16(ioct_resp->resp_len); xt_id = ltoh16(ioct_resp->trans_id); pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); status = ioct_resp->compl_hdr.status; #if defined(PCIE_D2H_SYNC_BZERO) memset(buf, 0, msglen); #endif /* PCIE_D2H_SYNC_BZERO */ DHD_CTL(("IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n", pkt_id, xt_id, status, resp_len)); dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len); dhd_os_ioctl_resp_wake(dhd); } static void BCMFASTPATH dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen) { dhd_prot_t *prot = dhd->prot; host_txbuf_cmpl_t * txstatus; unsigned long flags; uint32 pktid; void *pkt; /* locks required to protect circular buffer accesses */ DHD_GENERAL_LOCK(dhd, flags); txstatus = (host_txbuf_cmpl_t *)buf; pktid = ltoh32(txstatus->cmn_hdr.request_id); DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); if (prot->active_tx_count) prot->active_tx_count--; else DHD_ERROR(("Extra packets are freed\n")); ASSERT(pktid != 0); pkt = dhd_prot_packet_get(dhd, pktid, BUFF_TYPE_DATA_TX); if (pkt) { #if defined(BCMPCIE) dhd_txcomplete(dhd, pkt, true); #endif #if DHD_DBG_SHOW_METADATA if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) { uchar *ptr; /* The Ethernet header of TX frame was copied and removed. * Here, move the data pointer forward by Ethernet header size. */ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); } #endif /* DHD_DBG_SHOW_METADATA */ PKTFREE(dhd->osh, pkt, TRUE); } #if defined(PCIE_D2H_SYNC_BZERO) memset(buf, 0, msglen); #endif /* PCIE_D2H_SYNC_BZERO */ DHD_GENERAL_UNLOCK(dhd, flags); return; } static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len) { wlevent_req_msg_t *evnt; uint32 bufid; uint16 buflen; int ifidx = 0; void* pkt; unsigned long flags; dhd_prot_t *prot = dhd->prot; int post_cnt = 0; bool zero_posted = FALSE; /* Event complete header */ evnt = (wlevent_req_msg_t *)buf; bufid = ltoh32(evnt->cmn_hdr.request_id); buflen = ltoh16(evnt->event_data_len); ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); /* Post another rxbuf to the device */ if (prot->cur_event_bufs_posted) prot->cur_event_bufs_posted--; else zero_posted = TRUE; post_cnt = dhd_msgbuf_rxbuf_post_event_bufs(dhd); if (zero_posted && (post_cnt <= 0)) { return; } #if defined(PCIE_D2H_SYNC_BZERO) memset(buf, 0, len); #endif /* PCIE_D2H_SYNC_BZERO */ /* locks required to protect pktid_map */ DHD_GENERAL_LOCK(dhd, flags); pkt = dhd_prot_packet_get(dhd, ltoh32(bufid), BUFF_TYPE_EVENT_RX); DHD_GENERAL_UNLOCK(dhd, flags); if (!pkt) { DHD_ERROR(("%s: pkt is NULL\n", __FUNCTION__)); return; } /* DMA RX offset updated through shared area */ if (dhd->prot->rx_dataoffset) PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); PKTSETLEN(dhd->osh, pkt, buflen); dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); } static void BCMFASTPATH dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen) { host_rxbuf_cmpl_t *rxcmplt_h; uint16 data_offset; /* offset at which data starts */ void * pkt; unsigned long flags; static uint8 current_phase = 0; uint ifidx; /* RXCMPLT HDR */ rxcmplt_h = (host_rxbuf_cmpl_t *)buf; /* Post another set of rxbufs to the device */ dhd_prot_return_rxbuf(dhd, 1); /* offset from which data starts is populated in rxstatus0 */ data_offset = ltoh16(rxcmplt_h->data_offset); DHD_GENERAL_LOCK(dhd, flags); pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id), BUFF_TYPE_DATA_RX); DHD_GENERAL_UNLOCK(dhd, flags); if (!pkt) { return; } DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n", ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len), rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), ltoh16(rxcmplt_h->metadata_len))); #if DHD_DBG_SHOW_METADATA if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) { uchar *ptr; ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset); /* header followed by data */ bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len); dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len); } #endif /* DHD_DBG_SHOW_METADATA */ if (current_phase != rxcmplt_h->cmn_hdr.flags) { current_phase = rxcmplt_h->cmn_hdr.flags; } if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11) DHD_INFO(("D11 frame rxed \n")); /* data_offset from buf start */ if (data_offset) { /* data offset given from dongle after split rx */ PKTPULL(dhd->osh, pkt, data_offset); /* data offset */ } else { /* DMA RX offset updated through shared area */ if (dhd->prot->rx_dataoffset) PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); } /* Actual length of the packet */ PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len)); ifidx = rxcmplt_h->cmn_hdr.if_id; #if defined(PCIE_D2H_SYNC_BZERO) memset(buf, 0, msglen); #endif /* PCIE_D2H_SYNC_BZERO */ #ifdef DHD_RX_CHAINING /* Chain the packets */ dhd_rxchain_frame(dhd, pkt, ifidx); #else /* ! DHD_RX_CHAINING */ /* offset from which data starts is populated in rxstatus0 */ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); #endif /* ! DHD_RX_CHAINING */ } /* Stop protocol: sync w/dongle state. */ void dhd_prot_stop(dhd_pub_t *dhd) { /* nothing to do for pcie */ } /* Add any protocol-specific data header. * Caller must reserve prot_hdrlen prepend space. */ void BCMFASTPATH dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) { return; } uint dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) { return 0; } #define PKTBUF pktbuf int BCMFASTPATH dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) { unsigned long flags; dhd_prot_t *prot = dhd->prot; host_txbuf_post_t *txdesc = NULL; dmaaddr_t physaddr, meta_physaddr; uint8 *pktdata; uint32 pktlen; uint32 pktid; uint8 prio; uint16 flowid = 0; uint16 alloced = 0; uint16 headroom; msgbuf_ring_t *msg_ring; if (!dhd->flow_ring_table) return BCME_NORESOURCE; if (!dhd_bus_is_txmode_push(dhd->bus)) { flow_ring_table_t *flow_ring_table; flow_ring_node_t *flow_ring_node; flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF)); flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info; } else { msg_ring = prot->h2dring_txp_subn; } DHD_GENERAL_LOCK(dhd, flags); /* Create a unique 32-bit packet id */ pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF); if (pktid == DHD_PKTID_INVALID) { DHD_ERROR(("Pktid pool depleted.\n")); /* * If we return error here, the caller would queue the packet * again. So we'll just free the skb allocated in DMA Zone. * Since we have not freed the original SKB yet the caller would * requeue the same. */ goto err_no_res_pktfree; } /* Reserve space in the circular buffer */ txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd, msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (txdesc == NULL) { DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", __FUNCTION__, __LINE__, prot->active_tx_count)); /* Free up the PKTID */ PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr, pktlen, BUFF_TYPE_NO_CHECK); goto err_no_res_pktfree; } /* test if dhcp pkt */ /* Extract the data pointer and length information */ pktdata = PKTDATA(dhd->osh, PKTBUF); pktlen = PKTLEN(dhd->osh, PKTBUF); /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); /* Extract the ethernet header and adjust the data pointer and length */ pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); pktlen -= ETHER_HDR_LEN; /* Map the data pointer to a DMA-able address */ physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) { DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); ASSERT(0); } /* No need to lock. Save the rest of the packet's metadata */ NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid, physaddr, pktlen, DMA_TX, BUFF_TYPE_DATA_TX); #ifdef TXP_FLUSH_NITEMS if (msg_ring->pend_items_count == 0) msg_ring->start_addr = (void *)txdesc; msg_ring->pend_items_count++; #endif /* Form the Tx descriptor message buffer */ /* Common message hdr */ txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; txdesc->cmn_hdr.request_id = htol32(pktid); txdesc->cmn_hdr.if_id = ifidx; txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; prio = (uint8)PKTPRIO(PKTBUF); txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; txdesc->seg_cnt = 1; txdesc->data_len = htol16((uint16)pktlen); txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr)); txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr)); /* Move data pointer to keep ether header in local PKTBUF for later reference */ PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); /* Handle Tx metadata */ headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) DHD_ERROR(("No headroom for Metadata tx %d %d\n", prot->tx_metadata_offset, headroom)); if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); /* Adjust the data pointer to account for meta data in DMA_MAP */ PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); if (PHYSADDRISZERO(meta_physaddr)) { DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); ASSERT(0); } /* Adjust the data pointer back to original value */ PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); txdesc->metadata_buf_len = prot->tx_metadata_offset; txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr)); txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr)); } else { txdesc->metadata_buf_len = htol16(0); txdesc->metadata_buf_addr.high_addr = 0; txdesc->metadata_buf_addr.low_addr = 0; } DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, txdesc->cmn_hdr.request_id)); /* Update the write pointer in TCM & ring bell */ #ifdef TXP_FLUSH_NITEMS /* Flush if we have either hit the txp_threshold or if this msg is */ /* occupying the last slot in the flow_ring - before wrap around. */ if ((msg_ring->pend_items_count == prot->txp_threshold) || ((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) { dhd_prot_txdata_write_flush(dhd, flowid, TRUE); } #else prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); #endif prot->active_tx_count++; DHD_GENERAL_UNLOCK(dhd, flags); return BCME_OK; err_no_res_pktfree: DHD_GENERAL_UNLOCK(dhd, flags); return BCME_NORESOURCE; } /* called with a lock */ void BCMFASTPATH dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) { #ifdef TXP_FLUSH_NITEMS unsigned long flags = 0; flow_ring_table_t *flow_ring_table; flow_ring_node_t *flow_ring_node; msgbuf_ring_t *msg_ring; if (!dhd->flow_ring_table) return; if (!in_lock) { DHD_GENERAL_LOCK(dhd, flags); } flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info; /* Update the write pointer in TCM & ring bell */ if (msg_ring->pend_items_count) { prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr, msg_ring->pend_items_count); msg_ring->pend_items_count = 0; msg_ring->start_addr = NULL; } if (!in_lock) { DHD_GENERAL_UNLOCK(dhd, flags); } #endif /* TXP_FLUSH_NITEMS */ } #undef PKTBUF /* Only defined in the above routine */ int BCMFASTPATH dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) { return 0; } static void BCMFASTPATH dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt) { dhd_prot_t *prot = dhd->prot; if (prot->rxbufpost >= rxcnt) { prot->rxbufpost -= rxcnt; } else { /* ASSERT(0); */ prot->rxbufpost = 0; } if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) dhd_msgbuf_rxbuf_post(dhd); return; } #if defined(CUSTOMER_HW4) && defined(CONFIG_CONTROL_PM) extern bool g_pm_control; #endif /* CUSTOMER_HW4 & CONFIG_CONTROL_PM */ /* Use protocol to issue ioctl to dongle */ int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) { dhd_prot_t *prot = dhd->prot; int ret = -1; uint8 action; if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); goto done; } if (dhd->busstate == DHD_BUS_SUSPEND) { DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); goto done; } DHD_TRACE(("%s: Enter\n", __FUNCTION__)); #ifdef CUSTOMER_HW4 if (ioc->cmd == WLC_SET_PM) { #ifdef CONFIG_CONTROL_PM if (g_pm_control == TRUE) { DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", __FUNCTION__, *(char *)buf)); goto done; } #endif /* CONFIG_CONTROL_PM */ DHD_ERROR(("%s: SET PM to %d\n", __FUNCTION__, *(char *)buf)); } #endif /* CUSTOMER_HW4 */ ASSERT(len <= WLC_IOCTL_MAXLEN); if (len > WLC_IOCTL_MAXLEN) goto done; mutex_lock(&prot->ioctl_mutex); if (prot->pending == TRUE) { DHD_ERROR(("packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n", ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd, (unsigned long)prot->lastcmd)); if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) { DHD_TRACE(("iovar cmd=%s\n", (char*)buf)); } mutex_unlock(&prot->ioctl_mutex); goto done; } prot->pending = TRUE; prot->lastcmd = ioc->cmd; action = ioc->set; if (action & WL_IOCTL_ACTION_SET) { ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); } else { ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); if (ret > 0) ioc->used = ret; } /* Too many programs assume ioctl() returns 0 on success */ if (ret >= 0) { ret = 0; } else { #ifndef CUSTOMER_HW4 DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret)); #endif /* CUSTOMER_HW4 */ dhd->dongle_error = ret; } /* Intercept the wme_dp ioctl here */ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) { int slen, val = 0; slen = strlen("wme_dp") + 1; if (len >= (int)(slen + sizeof(int))) bcopy(((char *)buf + slen), &val, sizeof(int)); dhd->wme_dp = (uint8) ltoh32(val); } prot->pending = FALSE; mutex_unlock(&prot->ioctl_mutex); done: return ret; } int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) { unsigned long flags; dhd_prot_t *prot = dhd->prot; uint16 alloced = 0; ioct_reqst_hdr_t *ioct_rqst; uint16 hdrlen = sizeof(ioct_reqst_hdr_t); uint16 msglen = len + hdrlen; if (msglen > MSGBUF_MAX_MSG_SIZE) msglen = MSGBUF_MAX_MSG_SIZE; msglen = align(msglen, DMA_ALIGN_LEN); DHD_GENERAL_LOCK(dhd, flags); ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (ioct_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); return 0; } { uint8 *ptr; uint16 i; ptr = (uint8 *)ioct_rqst; for (i = 0; i < msglen; i++) { ptr[i] = i % 256; } } /* Common msg buf hdr */ ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; ioct_rqst->msg.if_id = 0; bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); /* Update the write pointer in TCM & ring bell */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return 0; } void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma) { if (dma == NULL) return; if (dma->srcmem.va) { DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va, dma->len, dma->srcmem.pa, dma->srcmem.dmah); dma->srcmem.va = NULL; } if (dma->destmem.va) { DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va, dma->len + 8, dma->destmem.pa, dma->destmem.dmah); dma->destmem.va = NULL; } } int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma) { uint i; if (!dma) return BCME_ERROR; /* First free up exisiting buffers */ dmaxfer_free_dmaaddr(dhd, dma); dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN, &i, &dma->srcmem.pa, &dma->srcmem.dmah); if (dma->srcmem.va == NULL) { return BCME_NOMEM; } /* Populate source with a pattern */ for (i = 0; i < len; i++) { ((uint8*)dma->srcmem.va)[i] = i % 256; } OSL_CACHE_FLUSH(dma->srcmem.va, len); dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN, &i, &dma->destmem.pa, &dma->destmem.dmah); if (dma->destmem.va == NULL) { DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va, dma->len, dma->srcmem.pa, dma->srcmem.dmah); dma->srcmem.va = NULL; return BCME_NOMEM; } /* Clear the destination buffer */ bzero(dma->destmem.va, len +8); OSL_CACHE_FLUSH(dma->destmem.va, len+8); dma->len = len; dma->srcdelay = srcdelay; dma->destdelay = destdelay; return BCME_OK; } static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen) { dhd_prot_t *prot = dhd->prot; OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len); if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) { if (memcmp(prot->dmaxfer.srcmem.va, prot->dmaxfer.destmem.va, prot->dmaxfer.len)) { bcm_print_bytes("XFER SRC: ", prot->dmaxfer.srcmem.va, prot->dmaxfer.len); bcm_print_bytes("XFER DEST: ", prot->dmaxfer.destmem.va, prot->dmaxfer.len); } else { DHD_INFO(("DMA successful\n")); } } dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); dhd->prot->dmaxfer_in_progress = FALSE; } int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay) { unsigned long flags; int ret = BCME_OK; dhd_prot_t *prot = dhd->prot; pcie_dma_xfer_params_t *dmap; uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len; uint16 msglen = sizeof(pcie_dma_xfer_params_t); uint16 alloced = 0; if (prot->dmaxfer_in_progress) { DHD_ERROR(("DMA is in progress...\n")); return ret; } prot->dmaxfer_in_progress = TRUE; if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, &prot->dmaxfer)) != BCME_OK) { prot->dmaxfer_in_progress = FALSE; return ret; } if (msglen > MSGBUF_MAX_MSG_SIZE) msglen = MSGBUF_MAX_MSG_SIZE; msglen = align(msglen, DMA_ALIGN_LEN); DHD_GENERAL_LOCK(dhd, flags); dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (dmap == NULL) { dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); prot->dmaxfer_in_progress = FALSE; DHD_GENERAL_UNLOCK(dhd, flags); return BCME_NOMEM; } /* Common msg buf hdr */ dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; dmap->cmn_hdr.request_id = 0x1234; dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa)); dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa)); dmap->xfer_len = htol32(prot->dmaxfer.len); dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); dmap->destdelay = htol32(prot->dmaxfer.destdelay); /* Update the write pointer in TCM & ring bell */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("DMA Started...\n")); return BCME_OK; } static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) { dhd_prot_t *prot = dhd->prot; int ret = 0; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); /* Respond "bcmerror" and "bcmerrorstr" with local cache */ if (cmd == WLC_GET_VAR && buf) { if (!strcmp((char *)buf, "bcmerrorstr")) { strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN); goto done; } else if (!strcmp((char *)buf, "bcmerror")) { *(int *)buf = dhd->dongle_error; goto done; } } ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx); if (ret < 0) { DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret)); return ret; } DHD_INFO(("ACTION %d ifdix %d cmd %d len %d \n", action, ifidx, cmd, len)); /* wait for interrupt and get first fragment */ ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va); done: return ret; } static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf) { dhd_prot_t *prot = dhd->prot; ioctl_comp_resp_msg_t ioct_resp; void* pkt; int retlen; int msgbuf_len = 0; int post_cnt = 0; unsigned long flags; bool zero_posted = FALSE; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); return -1; } if (prot->cur_ioctlresp_bufs_posted) prot->cur_ioctlresp_bufs_posted--; else zero_posted = TRUE; post_cnt = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); if (zero_posted && (post_cnt <= 0)) { return -1; } memset(&ioct_resp, 0, sizeof(ioctl_comp_resp_msg_t)); retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len); if (retlen <= 0) { DHD_ERROR(("IOCTL request failed with error code %d\n", retlen)); return retlen; } DHD_INFO(("ioctl resp retlen %d status %d, resp_len %d, pktid %d\n", retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len, ioct_resp.cmn_hdr.request_id)); if (ioct_resp.resp_len != 0) { DHD_GENERAL_LOCK(dhd, flags); pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id, BUFF_TYPE_IOCTL_RX); DHD_GENERAL_UNLOCK(dhd, flags); DHD_INFO(("ioctl ret buf %p retlen %d status %x \n", pkt, retlen, ioct_resp.compl_hdr.status)); /* get ret buf */ if ((buf) && (pkt)) { /* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */ /* ioct_resp.resp_len could have been changed to make it > 8 bytes */ bcopy(PKTDATA(dhd->osh, pkt), buf, len); } if (pkt) { #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) PKTFREE_STATIC(dhd->osh, pkt, FALSE); #else PKTFREE(dhd->osh, pkt, FALSE); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_IOCTLBUF */ } } else { DHD_GENERAL_LOCK(dhd, flags); #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_IOCTLBUF) dhd_prot_static_packet_free(dhd, ioct_resp.cmn_hdr.request_id, BUFF_TYPE_IOCTL_RX); #else dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id, BUFF_TYPE_IOCTL_RX); #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_IOCTLBUF */ DHD_GENERAL_UNLOCK(dhd, flags); } return (int)(ioct_resp.compl_hdr.status); } static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) { dhd_prot_t *prot = dhd->prot; int ret = 0; DHD_TRACE(("%s: Enter \n", __FUNCTION__)); DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len)); if (dhd->busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); return -EIO; } /* don't talk to the dongle if fw is about to be reloaded */ if (dhd->hang_was_sent) { DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", __FUNCTION__)); return -EIO; } /* Fill up msgbuf for ioctl req */ ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx); if (ret < 0) { DHD_ERROR(("%s : dhd_fillup_ioct_reqst_ptrbased error : %d\n", __FUNCTION__, ret)); return ret; } DHD_INFO(("ACTIOn %d ifdix %d cmd %d len %d \n", action, ifidx, cmd, len)); ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va); return ret; } /* Handles a protocol control response asynchronously */ int dhd_prot_ctl_complete(dhd_pub_t *dhd) { return 0; } /* Check for and handle local prot-specific iovar commands */ int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, void *params, int plen, void *arg, int len, bool set) { return BCME_UNSUPPORTED; } /* Add prot dump output to a buffer */ void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) { #if defined(PCIE_D2H_SYNC) if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) bcm_bprintf(strbuf, "\nd2h_sync: SEQNUM:"); else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) bcm_bprintf(strbuf, "\nd2h_sync: XORCSUM:"); else bcm_bprintf(strbuf, "\nd2h_sync: NONE:"); bcm_bprintf(strbuf, " d2h_sync_wait max<%lu> tot<%lu>\n", dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); #endif /* PCIE_D2H_SYNC */ } #ifdef DHD_DEBUG_PAGEALLOC static void dump_ring(dhd_pub_t *dhd, msgbuf_ring_t *ring) { void* ret_addr = NULL; cmn_msg_hdr_t *msg = NULL; uint16 msglen; uint16 r_ptr, w_ptr, depth; uint16 read_cnt, tot; int i; ioctl_resp_evt_buf_post_msg_t *rxbuf_post; host_rxbuf_post_t *rxbuf_data; dhd_bus_cmn_readshared(dhd->bus, &r_ptr, RING_READ_PTR, ring->idx); dhd_bus_cmn_readshared(dhd->bus, &w_ptr, RING_WRITE_PTR, ring->idx); read_cnt = READ_AVAIL_SPACE(w_ptr, r_ptr, RING_MAX_ITEM(ring)); tot = read_cnt; msglen = RING_LEN_ITEMS(ring); depth = ring->ringmem->max_item; DHD_ERROR(("%s:%s r:%d w:%d depth:%d msglen:%d tot:%d\n", __FUNCTION__, ring->name, r_ptr, w_ptr, depth, msglen, tot)); for (i = 0; i < tot; i++) { /* if space available, calculate address to be read */ ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items); /* Cache invalidate */ OSL_CACHE_INV((void *) ret_addr, ring->ringmem->len_items); msg = (cmn_msg_hdr_t *)ret_addr; if ((msg->msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg->msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)) { rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)msg+1; DHD_ERROR(("%d,%d) msgtype:0x%x request_id:0x%x if_id:0x%x" " addr:0x%x:0x%x len:%d\n", i, r_ptr, msg->msg_type, msg->request_id, msg->if_id, rxbuf_post->host_buf_addr.high_addr, rxbuf_post->host_buf_addr.low_addr, rxbuf_post->host_buf_len)); } else if (msg->msg_type == MSG_TYPE_RXBUF_POST) { rxbuf_data = (host_rxbuf_post_t *)msg+1; DHD_ERROR(("%d,%d) msgtype:0x%x request_id:0x%x if_id:0x%x" " addr:0x%x:%x len:%d meta:0x%x:%x len:%d\n", i, r_ptr, msg->msg_type, msg->request_id, msg->if_id, rxbuf_data->data_buf_addr.high_addr, rxbuf_data->data_buf_addr.low_addr, rxbuf_data->data_buf_len, rxbuf_data->metadata_buf_addr.high_addr, rxbuf_data->metadata_buf_addr.low_addr, rxbuf_data->metadata_buf_len)); } else { DHD_ERROR(("%d,%d) msgtype:0x%x request_id:0x%x if_id:0x%x\n", i, r_ptr, msg->msg_type, msg->request_id, msg->if_id)); } r_ptr = (r_ptr + 1) % depth; } } void dhd_prot_dump_kernel_crash(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; #if defined(PCIE_D2H_SYNC) if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) DHD_ERROR(("\nd2h_sync: SEQNUM:")); else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) DHD_ERROR(("\nd2h_sync: XORCSUM:")); else DHD_ERROR(("\nd2h_sync: NONE:")); DHD_ERROR((" d2h_sync_wait max<%lu> tot<%lu>\n", dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot)); #endif /* PCIE_D2H_SYNC */ /* Dump Ctrl */ if (prot->h2dring_ctrl_subn) dump_ring(dhd, prot->h2dring_ctrl_subn); if (prot->d2hring_ctrl_cpln) dump_ring(dhd, prot->d2hring_ctrl_cpln); /* Dump TX */ if (prot->h2dring_txp_subn) dump_ring(dhd, prot->h2dring_txp_subn); if (prot->d2hring_tx_cpln) dump_ring(dhd, prot->d2hring_tx_cpln); /* Dump RX */ if (prot->h2dring_rxp_subn) dump_ring(dhd, prot->h2dring_rxp_subn); if (prot->d2hring_rx_cpln) dump_ring(dhd, prot->d2hring_rx_cpln); } #endif /* DHD_DEBUG_PAGEALLOC */ /* Update local copy of dongle statistics */ void dhd_prot_dstats(dhd_pub_t *dhd) { return; } int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, uint reorder_info_len, void **pkt, uint32 *free_buf_count) { return 0; } /* post a dummy message to interrupt dongle */ /* used to process cons commands */ int dhd_post_dummy_msg(dhd_pub_t *dhd) { unsigned long flags; hostevent_hdr_t *hevent = NULL; uint16 alloced = 0; dhd_prot_t *prot = dhd->prot; DHD_GENERAL_LOCK(dhd, flags); hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (hevent == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); return -1; } /* CMN msg header */ hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; hevent->msg.if_id = 0; /* Event payload */ hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); /* Since, we are filling the data directly into the bufptr obtained * from the msgbuf, we can directly call the write_complete */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return 0; } static void * BCMFASTPATH dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced) { void * ret_buf; uint16 r_index = 0; /* Alloc space for nitems in the ring */ ret_buf = prot_get_ring_space(ring, nitems, alloced); if (ret_buf == NULL) { /* if alloc failed , invalidate cached read ptr */ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx); ring->ringstate->r_offset = r_index; } else dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)), RING_READ_PTR, ring->idx); /* Try allocating once more */ ret_buf = prot_get_ring_space(ring, nitems, alloced); if (ret_buf == NULL) { DHD_INFO(("RING space not available on ring %s for %d items \n", ring->name, nitems)); DHD_INFO(("write %d read %d \n\n", RING_WRITE_PTR(ring), RING_READ_PTR(ring))); return NULL; } } /* Return alloced space */ return ret_buf; } #define DHD_IOCTL_REQ_PKTID 0xFFFE /* Non inline ioct request */ /* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */ /* Form a separate request buffer where a 4 byte cmn header is added in the front */ /* buf contents from parent function is copied to remaining section of this buffer */ static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) { dhd_prot_t *prot = dhd->prot; ioctl_req_msg_t *ioct_rqst; void * ioct_buf; /* For ioctl payload */ uint16 rqstlen, resplen; unsigned long flags; uint16 alloced = 0; rqstlen = len; resplen = len; /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ /* 8K allocation of dongle buffer fails */ /* dhd doesnt give separate input & output buf lens */ /* so making the assumption that input length can never be more than 1.5k */ rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); DHD_GENERAL_LOCK(dhd, flags); /* Request for cbuf space */ ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (ioct_rqst == NULL) { DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); DHD_GENERAL_UNLOCK(dhd, flags); return -1; } /* Common msg buf hdr */ ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; ioct_rqst->cmn_hdr.flags = 0; ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID; ioct_rqst->cmd = htol32(cmd); ioct_rqst->output_buf_len = htol16(resplen); ioct_rqst->trans_id = prot->ioctl_trans_id ++; /* populate ioctl buffer info */ ioct_rqst->input_buf_len = htol16(rqstlen); ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); /* copy ioct payload */ ioct_buf = (void *) prot->ioctbuf.va; if (buf) memcpy(ioct_buf, buf, len); OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); if ((ulong)ioct_buf % DMA_ALIGN_LEN) DHD_ERROR(("host ioct address unaligned !!!!! \n")); DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, ioct_rqst->trans_id)); /* upd wrt ptr and raise interrupt */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return 0; } /* Packet to PacketID mapper */ typedef struct { ulong native; dmaaddr_t pa; uint32 pa_len; uchar dma; } pktid_t; typedef struct { void *osh; void *mwbmap_hdl; pktid_t *pktid_list; uint32 count; } pktid_map_t; void *pktid_map_init(void *osh, uint32 count) { pktid_map_t *handle; handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t)); if (handle == NULL) { printf("%s:%d: MALLOC failed for size %d\n", __FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t)); return NULL; } handle->osh = osh; handle->count = count; handle->mwbmap_hdl = bcm_mwbmap_init(osh, count); if (handle->mwbmap_hdl == NULL) { printf("%s:%d: bcm_mwbmap_init failed for count %d\n", __FUNCTION__, __LINE__, count); MFREE(osh, handle, sizeof(pktid_map_t)); return NULL; } handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1)); if (handle->pktid_list == NULL) { printf("%s:%d: MALLOC failed for count %d / total = %d\n", __FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count); bcm_mwbmap_fini(osh, handle->mwbmap_hdl); MFREE(osh, handle, sizeof(pktid_map_t)); return NULL; } return handle; } void pktid_map_uninit(void *pktid_map_handle) { pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; uint32 ix; if (handle != NULL) { void *osh = handle->osh; for (ix = 0; ix < MAX_PKTID_ITEMS; ix++) { if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) { /* Mark the slot as free */ bcm_mwbmap_free(handle->mwbmap_hdl, ix); /* Here we can do dma unmapping for 32 bit also. Since this in removal path, it will not affect performance */ DMA_UNMAP(osh, handle->pktid_list[ix+1].pa, (uint) handle->pktid_list[ix+1].pa_len, handle->pktid_list[ix+1].dma, 0, 0); PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE); } } bcm_mwbmap_fini(osh, handle->mwbmap_hdl); MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1)); MFREE(osh, handle, sizeof(pktid_map_t)); } return; } uint32 BCMFASTPATH pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma) { uint32 id; pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; if (handle == NULL) { printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n", __FUNCTION__, __LINE__); return 0; } id = bcm_mwbmap_alloc(handle->mwbmap_hdl); if (id == BCM_MWBMAP_INVALID_IDX) { printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n", __FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl)); return 0; } /* id=0 is invalid as we use this for error checking in the dongle */ id += 1; handle->pktid_list[id].native = (ulong) pkt; handle->pktid_list[id].pa = physaddr; handle->pktid_list[id].pa_len = (uint32) physlen; handle->pktid_list[id].dma = (uchar)dma; return id; } void * BCMFASTPATH pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen) { void *native = NULL; pktid_map_t *handle = (pktid_map_t *) pktid_map_handle; if (handle == NULL) { printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n", __FUNCTION__, __LINE__); return NULL; } /* Debug check */ if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) { printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n", __FUNCTION__, __LINE__, (id-1), (id-1)); return NULL; } native = (void *) handle->pktid_list[id].native; *physaddr = handle->pktid_list[id].pa; *physlen = (uint32) handle->pktid_list[id].pa_len; /* Mark the slot as free */ bcm_mwbmap_free(handle->mwbmap_hdl, (id-1)); return native; } static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid) { uint alloced = 0; msgbuf_ring_t *ring; dmaaddr_t physaddr; uint16 size; ASSERT(name); BCM_REFERENCE(physaddr); /* allocate ring info */ ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t)); if (ring == NULL) { ASSERT(0); return NULL; } bzero(ring, sizeof(*ring)); /* Init name */ strncpy(ring->name, name, sizeof(ring->name) - 1); /* Ringid in the order given in bcmpcie.h */ ring->idx = ringid; /* init ringmem */ ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t)); if (ring->ringmem == NULL) goto fail; bzero(ring->ringmem, sizeof(*ring->ringmem)); ring->ringmem->max_item = max_item; ring->ringmem->len_items = len_item; size = max_item * len_item; /* Ring Memmory allocation */ ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN, &alloced, &ring->ring_base.pa, &ring->ring_base.dmah); if (ring->ring_base.va == NULL) goto fail; ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa)); ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa)); ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0); bzero(ring->ring_base.va, size); OSL_CACHE_FLUSH((void *) ring->ring_base.va, size); /* Ring state init */ ring->ringstate = MALLOC(prot->osh, sizeof(ring_state_t)); if (ring->ringstate == NULL) goto fail; bzero(ring->ringstate, sizeof(*ring->ringstate)); DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " "ring start %p buf phys addr %x:%x \n", ring->name, ring->ringmem->max_item, ring->ringmem->len_items, size, ring->ring_base.va, ring->ringmem->base_addr.high_addr, ring->ringmem->base_addr.low_addr)); return ring; fail: if (ring->ring_base.va) { PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr); PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr); size = ring->ringmem->max_item * ring->ringmem->len_items; DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL); ring->ring_base.va = NULL; } if (ring->ringmem) MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t)); MFREE(prot->osh, ring, sizeof(msgbuf_ring_t)); ASSERT(0); return NULL; } static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) { /* update buffer address of ring */ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr, sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx); /* Update max items possible in ring */ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item, sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx); /* Update length of each item in the ring */ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items, sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx); /* ring inited */ ring->inited = TRUE; } static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring) { dmaaddr_t phyaddr; uint16 size; dhd_prot_t *prot = dhd->prot; BCM_REFERENCE(phyaddr); if (ring == NULL) return; if (ring->ringmem == NULL) { DHD_ERROR(("%s: ring->ringmem is NULL\n", __FUNCTION__)); return; } ring->inited = FALSE; PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr); PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr); size = ring->ringmem->max_item * ring->ringmem->len_items; /* Free up ring */ if (ring->ring_base.va) { DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, ring->ring_base.dmah); ring->ring_base.va = NULL; } /* Free up ring mem space */ if (ring->ringmem) { MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t)); ring->ringmem = NULL; } /* Free up ring state info */ if (ring->ringstate) { MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t)); ring->ringstate = NULL; } /* free up ring info */ MFREE(prot->osh, ring, sizeof(msgbuf_ring_t)); } /* Assumes only one index is updated ata time */ static void *BCMFASTPATH prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced) { void *ret_ptr = NULL; uint16 ring_avail_cnt; ASSERT(nitems <= RING_MAX_ITEM(ring)); ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring), RING_MAX_ITEM(ring)); if (ring_avail_cnt == 0) { return NULL; } *alloced = MIN(nitems, ring_avail_cnt); /* Return next available space */ ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring)); /* Update write pointer */ if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring)) RING_WRITE_PTR(ring) = 0; else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring)) RING_WRITE_PTR(ring) += *alloced; else { /* Should never hit this */ ASSERT(0); return NULL; } return ret_ptr; } static void BCMFASTPATH prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems) { dhd_prot_t *prot = dhd->prot; /* cache flush */ OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems); /* update write pointer */ /* If dma'ing h2d indices are supported * update the values in the host memory * o/w update the values in TCM */ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX, ring->idx, (uint16)RING_WRITE_PTR(ring)); else dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)), sizeof(uint16), RING_WRITE_PTR, ring->idx); /* raise h2d interrupt */ prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring)); } /* If dma'ing h2d indices are supported * this function updates the indices in * the host memory */ static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index) { dhd_prot_t *prot = dhd->prot; uint32 *ptr = NULL; uint16 offset = 0; switch (type) { case H2D_DMA_WRITEINDX: ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va); /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS * but in host memory their indices start * after H2D Common Rings */ if (ringid >= BCMPCIE_COMMON_MSGRINGS) offset = ringid - BCMPCIE_COMMON_MSGRINGS + BCMPCIE_H2D_COMMON_MSGRINGS; else offset = ringid; ptr += offset; *ptr = htol16(new_index); /* cache flush */ OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len); break; case D2H_DMA_READINDX: ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va); /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; ptr += offset; *ptr = htol16(new_index); /* cache flush */ OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len); break; default: DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", __FUNCTION__)); break; } DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n", __FUNCTION__, ptr, ringid, new_index)); } static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid) { uint32 *ptr = NULL; uint16 data = 0; uint16 offset = 0; switch (type) { case H2D_DMA_WRITEINDX: OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va, dhd->prot->h2d_dma_writeindx_buf_len); ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va); /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS * but in host memory their indices start * after H2D Common Rings */ if (ringid >= BCMPCIE_COMMON_MSGRINGS) offset = ringid - BCMPCIE_COMMON_MSGRINGS + BCMPCIE_H2D_COMMON_MSGRINGS; else offset = ringid; ptr += offset; data = LTOH16((uint16)*ptr); break; case H2D_DMA_READINDX: OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va, dhd->prot->h2d_dma_readindx_buf_len); ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va); /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS * but in host memory their indices start * after H2D Common Rings */ if (ringid >= BCMPCIE_COMMON_MSGRINGS) offset = ringid - BCMPCIE_COMMON_MSGRINGS + BCMPCIE_H2D_COMMON_MSGRINGS; else offset = ringid; ptr += offset; data = LTOH16((uint16)*ptr); break; case D2H_DMA_WRITEINDX: OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va, dhd->prot->d2h_dma_writeindx_buf_len); ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va); /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; ptr += offset; data = LTOH16((uint16)*ptr); break; case D2H_DMA_READINDX: OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va, dhd->prot->d2h_dma_readindx_buf_len); ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va); /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS; ptr += offset; data = LTOH16((uint16)*ptr); break; default: DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", __FUNCTION__)); break; } DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data)); return (data); } /* D2H dircetion: get next space to read from */ static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len) { uint16 w_ptr; uint16 r_ptr; uint16 depth; void* ret_addr = NULL; uint16 d2h_w_index = 0; DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n", __FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va), (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va))); /* update write pointer */ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) { /* DMAing write/read indices supported */ d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx); ring->ringstate->w_offset = d2h_w_index; } else dhd_bus_cmn_readshared(dhd->bus, &(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx); w_ptr = ring->ringstate->w_offset; r_ptr = ring->ringstate->r_offset; depth = ring->ringmem->max_item; /* check for avail space */ *available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth); if (*available_len == 0) return NULL; if (*available_len > ring->ringmem->max_item) { DHD_ERROR(("%s: *available_len %d, ring->ringmem->max_item %d\n", __FUNCTION__, *available_len, ring->ringmem->max_item)); return NULL; } /* if space available, calculate address to be read */ ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items); /* update read pointer */ if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item) ring->ringstate->r_offset = 0; else ring->ringstate->r_offset += *available_len; ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item); /* convert index to bytes */ *available_len = *available_len * ring->ringmem->len_items; /* Cache invalidate */ OSL_CACHE_INV((void *) ret_addr, *available_len); /* return read address */ return ret_addr; } static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) { /* update read index */ /* If dma'ing h2d indices supported * update the r -indices in the * host memory o/w in TCM */ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) dhd_set_dmaed_index(dhd, D2H_DMA_READINDX, ring->idx, (uint16)RING_READ_PTR(ring)); else dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)), sizeof(uint16), RING_READ_PTR, ring->idx); } static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) { dhd_prot_t *prot; if (!dhd || !dhd->prot) return; prot = dhd->prot; prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring); } static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) { dhd_prot_t *prot; if (!dhd || !dhd->prot) return; prot = dhd->prot; if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring)) return; if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring)) prot->rx_cpln_early_upd_idx = 0; if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) dhd_set_dmaed_index(dhd, D2H_DMA_READINDX, ring->idx, (uint16)prot->rx_cpln_early_upd_idx); else dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx), sizeof(uint16), RING_READ_PTR, ring->idx); } int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) { tx_flowring_create_request_t *flow_create_rqst; msgbuf_ring_t *msgbuf_flow_info; dhd_prot_t *prot = dhd->prot; uint16 hdrlen = sizeof(tx_flowring_create_request_t); uint16 msglen = hdrlen; unsigned long flags; uint16 alloced = 0; if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr", H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE, BCMPCIE_H2D_TXFLOWRINGID + (flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) { DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__)); return BCME_NOMEM; } /* Clear write pointer of the ring */ flow_ring_node->prot_info = (void *)msgbuf_flow_info; /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ msglen = align(msglen, DMA_ALIGN_LEN); DHD_GENERAL_LOCK(dhd, flags); /* Request for ring buffer space */ flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (flow_create_rqst == NULL) { DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__)); DHD_GENERAL_UNLOCK(dhd, flags); return BCME_NOMEM; } msgbuf_flow_info->inited = TRUE; /* Common msg buf hdr */ flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_create_rqst->msg.request_id = htol16(0); /* TBD */ /* Update flow create message */ flow_create_rqst->tid = flow_ring_node->flow_info.tid; flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr; flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr; flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM); flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer " MACDBG " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, flow_ring_node->flow_info.ifindex)); /* upd wrt ptr and raise interrupt */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); /* If dma'ing indices supported * update the w-index in host memory o/w in TCM */ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX, msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info)); else dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)), sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx); DHD_GENERAL_UNLOCK(dhd, flags); return BCME_OK; } static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen) { tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf; DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__, flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id)); dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id, flow_create_resp->cmplt.status); } void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) { msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; dhd_prot_ring_detach(dhd, flow_ring); DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); } void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, struct bcmstrbuf *strbuf) { msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; uint16 rd, wrt; dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx); dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx); bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt); } void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) { bcm_bprintf(strbuf, "CtrlPost: "); dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf); bcm_bprintf(strbuf, "CtrlCpl: "); dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf); bcm_bprintf(strbuf, "RxPost: "); bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost); dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf); bcm_bprintf(strbuf, "RxCpl: "); dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf); if (dhd_bus_is_txmode_push(dhd->bus)) { bcm_bprintf(strbuf, "TxPost: "); dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf); } bcm_bprintf(strbuf, "TxCpl: "); dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf); bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n", dhd->prot->active_tx_count, dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle)); } int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) { tx_flowring_delete_request_t *flow_delete_rqst; dhd_prot_t *prot = dhd->prot; uint16 msglen = sizeof(tx_flowring_delete_request_t); unsigned long flags; uint16 alloced = 0; /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ msglen = align(msglen, DMA_ALIGN_LEN); /* Request for ring buffer space */ DHD_GENERAL_LOCK(dhd, flags); flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (flow_delete_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen)); return BCME_NOMEM; } /* Common msg buf hdr */ flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_delete_rqst->msg.request_id = htol16(0); /* TBD */ /* Update Delete info */ flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); flow_delete_rqst->reason = htol16(BCME_OK); DHD_ERROR(("%s sending FLOW RING ID %d for peer " MACDBG " prio %d ifindex %d" " Delete req msglen %d\n", __FUNCTION__, flow_ring_node->flowid, MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, flow_ring_node->flow_info.ifindex, msglen)); /* upd wrt ptr and raise interrupt */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return BCME_OK; } static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen) { tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf; DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__, flow_delete_resp->cmplt.status)); dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, flow_delete_resp->cmplt.status); } int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) { tx_flowring_flush_request_t *flow_flush_rqst; dhd_prot_t *prot = dhd->prot; uint16 msglen = sizeof(tx_flowring_flush_request_t); unsigned long flags; uint16 alloced = 0; /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */ msglen = align(msglen, DMA_ALIGN_LEN); /* Request for ring buffer space */ DHD_GENERAL_LOCK(dhd, flags); flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced); if (flow_flush_rqst == NULL) { DHD_GENERAL_UNLOCK(dhd, flags); DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen)); return BCME_NOMEM; } /* Common msg buf hdr */ flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; flow_flush_rqst->msg.request_id = htol16(0); /* TBD */ flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); flow_flush_rqst->reason = htol16(BCME_OK); DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen)); /* upd wrt ptr and raise interrupt */ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); DHD_GENERAL_UNLOCK(dhd, flags); return BCME_OK; } static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen) { tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf; DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__, flow_flush_resp->cmplt.status)); dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, flow_flush_resp->cmplt.status); } int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) { uint32 *ptr; uint32 value; uint32 i; uint8 txpush = 0; uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush); OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va, dhd->prot->d2h_dma_writeindx_buf_len); ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va); bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush); bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); value = ltoh32(*ptr); bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); ptr++; value = ltoh32(*ptr); bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); if (txpush) { ptr++; value = ltoh32(*ptr); bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value); } else { ptr++; bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { value = ltoh32(*ptr); bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); ptr++; } } OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va, dhd->prot->h2d_dma_readindx_buf_len); ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va); bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); value = ltoh32(*ptr); bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); ptr++; value = ltoh32(*ptr); bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); ptr++; value = ltoh32(*ptr); bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); return 0; } uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) { dhd_prot_t *prot = dhd->prot; if (rx) prot->rx_metadata_offset = (uint16)val; else prot->tx_metadata_offset = (uint16)val; return dhd_prot_metadatalen_get(dhd, rx); } uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) { dhd_prot_t *prot = dhd->prot; if (rx) return prot->rx_metadata_offset; else return prot->tx_metadata_offset; } uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) { dhd_prot_t *prot = dhd->prot; if (set) prot->txp_threshold = (uint16)val; val = prot->txp_threshold; return val; } #ifdef DHD_RX_CHAINING static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain) { rxchain->pkt_count = 0; } static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) { uint8 *eh; uint8 prio; dhd_prot_t *prot = dhd->prot; rxchain_info_t *rxchain = &prot->rxchain; eh = PKTDATA(dhd->osh, pkt); prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ /* so that the chain can be handed off to CTF bridge as is. */ if (rxchain->pkt_count == 0) { /* First packet in chain */ rxchain->pkthead = rxchain->pkttail = pkt; /* Keep a copy of ptr to ether_da, ether_sa and prio */ rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; rxchain->h_prio = prio; rxchain->ifidx = ifidx; rxchain->pkt_count++; } else { if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, rxchain->h_da, rxchain->h_prio)) { /* Same flow - keep chaining */ PKTSETCLINK(rxchain->pkttail, pkt); rxchain->pkttail = pkt; rxchain->pkt_count++; } else { /* Different flow - First release the existing chain */ dhd_rxchain_commit(dhd); /* Create a new chain */ rxchain->pkthead = rxchain->pkttail = pkt; /* Keep a copy of ptr to ether_da, ether_sa and prio */ rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; rxchain->h_prio = prio; rxchain->ifidx = ifidx; rxchain->pkt_count++; } } if ((!ETHER_ISMULTI(rxchain->h_da)) && ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { PKTSETCHAINED(dhd->osh, pkt); PKTCINCRCNT(rxchain->pkthead); PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); } else { dhd_rxchain_commit(dhd); return; } /* If we have hit the max chain length, dispatch the chain and reset */ if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { dhd_rxchain_commit(dhd); } } static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd) { dhd_prot_t *prot = dhd->prot; rxchain_info_t *rxchain = &prot->rxchain; if (rxchain->pkt_count == 0) return; /* Release the packets to dhd_linux */ dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); /* Reset the chain */ dhd_rxchain_reset(rxchain); } #endif /* DHD_RX_CHAINING */ static void dhd_prot_ring_clear(msgbuf_ring_t* ring) { uint16 size; DHD_TRACE(("%s\n", __FUNCTION__)); size = ring->ringmem->max_item * ring->ringmem->len_items; ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0); OSL_CACHE_INV((void *) ring->ring_base.va, size); bzero(ring->ring_base.va, size); OSL_CACHE_FLUSH((void *) ring->ring_base.va, size); bzero(ring->ringstate, sizeof(*ring->ringstate)); } void dhd_prot_clear(dhd_pub_t *dhd) { struct dhd_prot *prot = dhd->prot; DHD_TRACE(("%s\n", __FUNCTION__)); if (prot == NULL) return; if (prot->h2dring_txp_subn) dhd_prot_ring_clear(prot->h2dring_txp_subn); if (prot->h2dring_rxp_subn) dhd_prot_ring_clear(prot->h2dring_rxp_subn); if (prot->h2dring_ctrl_subn) dhd_prot_ring_clear(prot->h2dring_ctrl_subn); if (prot->d2hring_tx_cpln) dhd_prot_ring_clear(prot->d2hring_tx_cpln); if (prot->d2hring_rx_cpln) dhd_prot_ring_clear(prot->d2hring_rx_cpln); if (prot->d2hring_ctrl_cpln) dhd_prot_ring_clear(prot->d2hring_ctrl_cpln); if (prot->retbuf.va) { OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); bzero(prot->retbuf.va, IOCT_RETBUF_SIZE); OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE); } if (prot->ioctbuf.va) { OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE); OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE); } if (prot->d2h_dma_scratch_buf.va) { OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN); } if (prot->h2d_dma_readindx_buf.va) { OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va, prot->h2d_dma_readindx_buf_len); bzero(prot->h2d_dma_readindx_buf.va, prot->h2d_dma_readindx_buf_len); OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, prot->h2d_dma_readindx_buf_len); } if (prot->h2d_dma_writeindx_buf.va) { OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len); bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len); OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len); } if (prot->d2h_dma_readindx_buf.va) { OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len); bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len); OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len); } if (prot->d2h_dma_writeindx_buf.va) { OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len); bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len); OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len); } prot->rx_metadata_offset = 0; prot->tx_metadata_offset = 0; prot->rxbufpost = 0; prot->cur_event_bufs_posted = 0; prot->cur_ioctlresp_bufs_posted = 0; prot->active_tx_count = 0; prot->data_seq_no = 0; prot->ioctl_seq_no = 0; prot->pending = 0; prot->lastcmd = 0; prot->ioctl_trans_id = 1; /* dhd_flow_rings_init is located at dhd_bus_start, * so when stopping bus, flowrings shall be deleted */ dhd_flow_rings_deinit(dhd); NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle); PKTCLEAR_STATIC(dhd->osh); }
gpl-2.0
janimo/android_kernel_huawei_u9200
drivers/base/core.c
530
47098
/* * drivers/base/core.c - core driver model code (device registration, etc) * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2006 Novell, Inc. * * This file is released under the GPLv2 * */ #include <linux/device.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kdev_t.h> #include <linux/notifier.h> #include <linux/genhd.h> #include <linux/kallsyms.h> #include <linux/mutex.h> #include <linux/async.h> #include "base.h" #include "power/power.h" #ifdef CONFIG_SYSFS_DEPRECATED #ifdef CONFIG_SYSFS_DEPRECATED_V2 long sysfs_deprecated = 1; #else long sysfs_deprecated = 0; #endif static __init int sysfs_deprecated_setup(char *arg) { return strict_strtol(arg, 10, &sysfs_deprecated); } early_param("sysfs.deprecated", sysfs_deprecated_setup); #endif int (*platform_notify)(struct device *dev) = NULL; int (*platform_notify_remove)(struct device *dev) = NULL; static struct kobject *dev_kobj; struct kobject *sysfs_dev_char_kobj; struct kobject *sysfs_dev_block_kobj; #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { return !(dev->type == &part_type); } #else static inline int device_is_not_partition(struct device *dev) { return 1; } #endif /** * dev_driver_string - Return a device's driver name, if at all possible * @dev: struct device to get the name of * * Will return the device's driver's name if it is bound to a device. If * the device is not bound to a device, it will return the name of the bus * it is attached to. If it is not attached to a bus either, an empty * string will be returned. */ const char *dev_driver_string(const struct device *dev) { struct device_driver *drv; /* dev->driver can change to NULL underneath us because of unbinding, * so be careful about accessing it. dev->bus and dev->class should * never change once they are set, so they don't need special care. */ drv = ACCESS_ONCE(dev->driver); return drv ? drv->name : (dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "")); } EXPORT_SYMBOL(dev_driver_string); #define to_dev(obj) container_of(obj, struct device, kobj) #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dev_attr = to_dev_attr(attr); struct device *dev = to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->show) ret = dev_attr->show(dev, dev_attr, buf); if (ret >= (ssize_t)PAGE_SIZE) { print_symbol("dev_attr_show: %s returned bad count\n", (unsigned long)dev_attr->show); } return ret; } static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dev_attr = to_dev_attr(attr); struct device *dev = to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->store) ret = dev_attr->store(dev, dev_attr, buf, count); return ret; } static const struct sysfs_ops dev_sysfs_ops = { .show = dev_attr_show, .store = dev_attr_store, }; /** * device_release - free device structure. * @kobj: device's kobject. * * This is called once the reference count for the object * reaches 0. We forward the call to the device's release * method, which should handle actually freeing the structure. */ static void device_release(struct kobject *kobj) { struct device *dev = to_dev(kobj); struct device_private *p = dev->p; if (dev->release) dev->release(dev); else if (dev->type && dev->type->release) dev->type->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); else WARN(1, KERN_ERR "Device '%s' does not have a release() " "function, it is broken and must be fixed.\n", dev_name(dev)); kfree(p); } static const void *device_namespace(struct kobject *kobj) { struct device *dev = to_dev(kobj); const void *ns = NULL; if (dev->class && dev->class->ns_type) ns = dev->class->namespace(dev); return ns; } static struct kobj_type device_ktype = { .release = device_release, .sysfs_ops = &dev_sysfs_ops, .namespace = device_namespace, }; static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &device_ktype) { struct device *dev = to_dev(kobj); if (dev->bus) return 1; if (dev->class) return 1; } return 0; } static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) { struct device *dev = to_dev(kobj); if (dev->bus) return dev->bus->name; if (dev->class) return dev->class->name; return NULL; } static int dev_uevent(struct kset *kset, struct kobject *kobj, struct kobj_uevent_env *env) { struct device *dev = to_dev(kobj); int retval = 0; /* add device node properties if present */ if (MAJOR(dev->devt)) { const char *tmp; const char *name; mode_t mode = 0; add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); name = device_get_devnode(dev, &mode, &tmp); if (name) { add_uevent_var(env, "DEVNAME=%s", name); kfree(tmp); if (mode) add_uevent_var(env, "DEVMODE=%#o", mode & 0777); } } if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); if (dev->driver) add_uevent_var(env, "DRIVER=%s", dev->driver->name); /* have the bus specific function add its stuff */ if (dev->bus && dev->bus->uevent) { retval = dev->bus->uevent(dev, env); if (retval) pr_debug("device: '%s': %s: bus uevent() returned %d\n", dev_name(dev), __func__, retval); } /* have the class specific function add its stuff */ if (dev->class && dev->class->dev_uevent) { retval = dev->class->dev_uevent(dev, env); if (retval) pr_debug("device: '%s': %s: class uevent() " "returned %d\n", dev_name(dev), __func__, retval); } /* have the device type specific function add its stuff */ if (dev->type && dev->type->uevent) { retval = dev->type->uevent(dev, env); if (retval) pr_debug("device: '%s': %s: dev_type uevent() " "returned %d\n", dev_name(dev), __func__, retval); } return retval; } static const struct kset_uevent_ops device_uevent_ops = { .filter = dev_uevent_filter, .name = dev_uevent_name, .uevent = dev_uevent, }; static ssize_t show_uevent(struct device *dev, struct device_attribute *attr, char *buf) { struct kobject *top_kobj; struct kset *kset; struct kobj_uevent_env *env = NULL; int i; size_t count = 0; int retval; /* search the kset, the device belongs to */ top_kobj = &dev->kobj; while (!top_kobj->kset && top_kobj->parent) top_kobj = top_kobj->parent; if (!top_kobj->kset) goto out; kset = top_kobj->kset; if (!kset->uevent_ops || !kset->uevent_ops->uevent) goto out; /* respect filter */ if (kset->uevent_ops && kset->uevent_ops->filter) if (!kset->uevent_ops->filter(kset, &dev->kobj)) goto out; env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); if (!env) return -ENOMEM; /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); if (retval) goto out; /* copy keys to file */ for (i = 0; i < env->envp_idx; i++) count += sprintf(&buf[count], "%s\n", env->envp[i]); out: kfree(env); return count; } static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { enum kobject_action action; if (kobject_action_type(buf, count, &action) == 0) kobject_uevent(&dev->kobj, action); else dev_err(dev, "uevent: unknown action-string\n"); return count; } static struct device_attribute uevent_attr = __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent); static int device_add_attributes(struct device *dev, struct device_attribute *attrs) { int error = 0; int i; if (attrs) { for (i = 0; attr_name(attrs[i]); i++) { error = device_create_file(dev, &attrs[i]); if (error) break; } if (error) while (--i >= 0) device_remove_file(dev, &attrs[i]); } return error; } static void device_remove_attributes(struct device *dev, struct device_attribute *attrs) { int i; if (attrs) for (i = 0; attr_name(attrs[i]); i++) device_remove_file(dev, &attrs[i]); } static int device_add_bin_attributes(struct device *dev, struct bin_attribute *attrs) { int error = 0; int i; if (attrs) { for (i = 0; attr_name(attrs[i]); i++) { error = device_create_bin_file(dev, &attrs[i]); if (error) break; } if (error) while (--i >= 0) device_remove_bin_file(dev, &attrs[i]); } return error; } static void device_remove_bin_attributes(struct device *dev, struct bin_attribute *attrs) { int i; if (attrs) for (i = 0; attr_name(attrs[i]); i++) device_remove_bin_file(dev, &attrs[i]); } static int device_add_groups(struct device *dev, const struct attribute_group **groups) { int error = 0; int i; if (groups) { for (i = 0; groups[i]; i++) { error = sysfs_create_group(&dev->kobj, groups[i]); if (error) { while (--i >= 0) sysfs_remove_group(&dev->kobj, groups[i]); break; } } } return error; } static void device_remove_groups(struct device *dev, const struct attribute_group **groups) { int i; if (groups) for (i = 0; groups[i]; i++) sysfs_remove_group(&dev->kobj, groups[i]); } static int device_add_attrs(struct device *dev) { struct class *class = dev->class; const struct device_type *type = dev->type; int error; if (class) { error = device_add_attributes(dev, class->dev_attrs); if (error) return error; error = device_add_bin_attributes(dev, class->dev_bin_attrs); if (error) goto err_remove_class_attrs; } if (type) { error = device_add_groups(dev, type->groups); if (error) goto err_remove_class_bin_attrs; } error = device_add_groups(dev, dev->groups); if (error) goto err_remove_type_groups; return 0; err_remove_type_groups: if (type) device_remove_groups(dev, type->groups); err_remove_class_bin_attrs: if (class) device_remove_bin_attributes(dev, class->dev_bin_attrs); err_remove_class_attrs: if (class) device_remove_attributes(dev, class->dev_attrs); return error; } static void device_remove_attrs(struct device *dev) { struct class *class = dev->class; const struct device_type *type = dev->type; device_remove_groups(dev, dev->groups); if (type) device_remove_groups(dev, type->groups); if (class) { device_remove_attributes(dev, class->dev_attrs); device_remove_bin_attributes(dev, class->dev_bin_attrs); } } static ssize_t show_dev(struct device *dev, struct device_attribute *attr, char *buf) { return print_dev_t(buf, dev->devt); } static struct device_attribute devt_attr = __ATTR(dev, S_IRUGO, show_dev, NULL); /* kset to create /sys/devices/ */ struct kset *devices_kset; /** * device_create_file - create sysfs attribute file for device. * @dev: device. * @attr: device attribute descriptor. */ int device_create_file(struct device *dev, const struct device_attribute *attr) { int error = 0; if (dev) error = sysfs_create_file(&dev->kobj, &attr->attr); return error; } /** * device_remove_file - remove sysfs attribute file. * @dev: device. * @attr: device attribute descriptor. */ void device_remove_file(struct device *dev, const struct device_attribute *attr) { if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); } /** * device_create_bin_file - create sysfs binary attribute file for device. * @dev: device. * @attr: device binary attribute descriptor. */ int device_create_bin_file(struct device *dev, const struct bin_attribute *attr) { int error = -EINVAL; if (dev) error = sysfs_create_bin_file(&dev->kobj, attr); return error; } EXPORT_SYMBOL_GPL(device_create_bin_file); /** * device_remove_bin_file - remove sysfs binary attribute file * @dev: device. * @attr: device binary attribute descriptor. */ void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr) { if (dev) sysfs_remove_bin_file(&dev->kobj, attr); } EXPORT_SYMBOL_GPL(device_remove_bin_file); /** * device_schedule_callback_owner - helper to schedule a callback for a device * @dev: device. * @func: callback function to invoke later. * @owner: module owning the callback routine * * Attribute methods must not unregister themselves or their parent device * (which would amount to the same thing). Attempts to do so will deadlock, * since unregistration is mutually exclusive with driver callbacks. * * Instead methods can call this routine, which will attempt to allocate * and schedule a workqueue request to call back @func with @dev as its * argument in the workqueue's process context. @dev will be pinned until * @func returns. * * This routine is usually called via the inline device_schedule_callback(), * which automatically sets @owner to THIS_MODULE. * * Returns 0 if the request was submitted, -ENOMEM if storage could not * be allocated, -ENODEV if a reference to @owner isn't available. * * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an * underlying sysfs routine (since it is intended for use by attribute * methods), and if sysfs isn't available you'll get nothing but -ENOSYS. */ int device_schedule_callback_owner(struct device *dev, void (*func)(struct device *), struct module *owner) { return sysfs_schedule_callback(&dev->kobj, (void (*)(void *)) func, dev, owner); } EXPORT_SYMBOL_GPL(device_schedule_callback_owner); static void klist_children_get(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); struct device *dev = p->device; get_device(dev); } static void klist_children_put(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); struct device *dev = p->device; put_device(dev); } /** * device_initialize - init device structure. * @dev: device. * * This prepares the device for use by other layers by initializing * its fields. * It is the first half of device_register(), if called by * that function, though it can also be called separately, so one * may use @dev's fields. In particular, get_device()/put_device() * may be used for reference counting of @dev after calling this * function. * * NOTE: Use put_device() to give up your reference instead of freeing * @dev directly once you have called this function. */ void device_initialize(struct device *dev) { dev->kobj.kset = devices_kset; kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); set_dev_node(dev, -1); } static struct kobject *virtual_device_parent(struct device *dev) { static struct kobject *virtual_dir = NULL; if (!virtual_dir) virtual_dir = kobject_create_and_add("virtual", &devices_kset->kobj); return virtual_dir; } struct class_dir { struct kobject kobj; struct class *class; }; #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) static void class_dir_release(struct kobject *kobj) { struct class_dir *dir = to_class_dir(kobj); kfree(dir); } static const struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) { struct class_dir *dir = to_class_dir(kobj); return dir->class->ns_type; } static struct kobj_type class_dir_ktype = { .release = class_dir_release, .sysfs_ops = &kobj_sysfs_ops, .child_ns_type = class_dir_child_ns_type }; static struct kobject * class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) { struct class_dir *dir; int retval; dir = kzalloc(sizeof(*dir), GFP_KERNEL); if (!dir) return NULL; dir->class = class; kobject_init(&dir->kobj, &class_dir_ktype); dir->kobj.kset = &class->p->glue_dirs; retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); if (retval < 0) { kobject_put(&dir->kobj); return NULL; } return &dir->kobj; } static struct kobject *get_device_parent(struct device *dev, struct device *parent) { if (dev->class) { static DEFINE_MUTEX(gdp_mutex); struct kobject *kobj = NULL; struct kobject *parent_kobj; struct kobject *k; #ifdef CONFIG_BLOCK /* block disks show up in /sys/block */ if (sysfs_deprecated && dev->class == &block_class) { if (parent && parent->class == &block_class) return &parent->kobj; return &block_class.p->subsys.kobj; } #endif /* * If we have no parent, we live in "virtual". * Class-devices with a non class-device as parent, live * in a "glue" directory to prevent namespace collisions. */ if (parent == NULL) parent_kobj = virtual_device_parent(dev); else if (parent->class && !dev->class->ns_type) return &parent->kobj; else parent_kobj = &parent->kobj; mutex_lock(&gdp_mutex); /* find our class-directory at the parent and reference it */ spin_lock(&dev->class->p->glue_dirs.list_lock); list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) if (k->parent == parent_kobj) { kobj = kobject_get(k); break; } spin_unlock(&dev->class->p->glue_dirs.list_lock); if (kobj) { mutex_unlock(&gdp_mutex); return kobj; } /* or create a new class-directory at the parent device */ k = class_dir_create_and_add(dev->class, parent_kobj); /* do not emit an uevent for this simple "glue" directory */ mutex_unlock(&gdp_mutex); return k; } if (parent) return &parent->kobj; return NULL; } static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { /* see if we live in a "glue" directory */ if (!glue_dir || !dev->class || glue_dir->kset != &dev->class->p->glue_dirs) return; kobject_put(glue_dir); } static void cleanup_device_parent(struct device *dev) { cleanup_glue_dir(dev, dev->kobj.parent); } static void setup_parent(struct device *dev, struct device *parent) { struct kobject *kobj; kobj = get_device_parent(dev, parent); if (kobj) dev->kobj.parent = kobj; } static int device_add_class_symlinks(struct device *dev) { int error; if (!dev->class) return 0; error = sysfs_create_link(&dev->kobj, &dev->class->p->subsys.kobj, "subsystem"); if (error) goto out; if (dev->parent && device_is_not_partition(dev)) { error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, "device"); if (error) goto out_subsys; } #ifdef CONFIG_BLOCK /* /sys/block has directories and does not need symlinks */ if (sysfs_deprecated && dev->class == &block_class) return 0; #endif /* link in the class directory pointing to the device */ error = sysfs_create_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); if (error) goto out_device; return 0; out_device: sysfs_remove_link(&dev->kobj, "device"); out_subsys: sysfs_remove_link(&dev->kobj, "subsystem"); out: return error; } static void device_remove_class_symlinks(struct device *dev) { if (!dev->class) return; if (dev->parent && device_is_not_partition(dev)) sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(&dev->kobj, "subsystem"); #ifdef CONFIG_BLOCK if (sysfs_deprecated && dev->class == &block_class) return; #endif sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); } /** * dev_set_name - set a device name * @dev: device * @fmt: format string for the device's name */ int dev_set_name(struct device *dev, const char *fmt, ...) { va_list vargs; int err; va_start(vargs, fmt); err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); va_end(vargs); return err; } EXPORT_SYMBOL_GPL(dev_set_name); /** * device_to_dev_kobj - select a /sys/dev/ directory for the device * @dev: device * * By default we select char/ for new entries. Setting class->dev_obj * to NULL prevents an entry from being created. class->dev_kobj must * be set (or cleared) before any devices are registered to the class * otherwise device_create_sys_dev_entry() and * device_remove_sys_dev_entry() will disagree about the the presence * of the link. */ static struct kobject *device_to_dev_kobj(struct device *dev) { struct kobject *kobj; if (dev->class) kobj = dev->class->dev_kobj; else kobj = sysfs_dev_char_kobj; return kobj; } static int device_create_sys_dev_entry(struct device *dev) { struct kobject *kobj = device_to_dev_kobj(dev); int error = 0; char devt_str[15]; if (kobj) { format_dev_t(devt_str, dev->devt); error = sysfs_create_link(kobj, &dev->kobj, devt_str); } return error; } static void device_remove_sys_dev_entry(struct device *dev) { struct kobject *kobj = device_to_dev_kobj(dev); char devt_str[15]; if (kobj) { format_dev_t(devt_str, dev->devt); sysfs_remove_link(kobj, devt_str); } } int device_private_init(struct device *dev) { dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); if (!dev->p) return -ENOMEM; dev->p->device = dev; klist_init(&dev->p->klist_children, klist_children_get, klist_children_put); return 0; } /** * device_add - add device to device hierarchy. * @dev: device. * * This is part 2 of device_register(), though may be called * separately _iff_ device_initialize() has been called separately. * * This adds @dev to the kobject hierarchy via kobject_add(), adds it * to the global and sibling lists for the device, then * adds it to the other relevant subsystems of the driver model. * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up your * reference instead. */ int device_add(struct device *dev) { struct device *parent = NULL; struct class_interface *class_intf; int error = -EINVAL; dev = get_device(dev); if (!dev) goto done; if (!dev->p) { error = device_private_init(dev); if (error) goto done; } /* * for statically allocated devices, which should all be converted * some day, we need to initialize the name. We prevent reading back * the name, and force the use of dev_name() */ if (dev->init_name) { dev_set_name(dev, "%s", dev->init_name); dev->init_name = NULL; } if (!dev_name(dev)) { error = -EINVAL; goto name_error; } pr_debug("device: '%s': %s\n", dev_name(dev), __func__); parent = get_device(dev->parent); setup_parent(dev, parent); /* use parent numa_node */ if (parent) set_dev_node(dev, dev_to_node(parent)); /* first, register with generic layer. */ /* we require the name to be set before, and pass NULL */ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); if (error) goto Error; /* notify platform of device entry */ if (platform_notify) platform_notify(dev); error = device_create_file(dev, &uevent_attr); if (error) goto attrError; if (MAJOR(dev->devt)) { error = device_create_file(dev, &devt_attr); if (error) goto ueventattrError; error = device_create_sys_dev_entry(dev); if (error) goto devtattrError; devtmpfs_create_node(dev); } error = device_add_class_symlinks(dev); if (error) goto SymlinkError; error = device_add_attrs(dev); if (error) goto AttrsError; error = bus_add_device(dev); if (error) goto BusError; error = dpm_sysfs_add(dev); if (error) goto DPMError; device_pm_add(dev); /* Notify clients of device addition. This call must come * after dpm_sysf_add() and before kobject_uevent(). */ if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_ADD_DEVICE, dev); kobject_uevent(&dev->kobj, KOBJ_ADD); bus_probe_device(dev); if (parent) klist_add_tail(&dev->p->knode_parent, &parent->p->klist_children); if (dev->class) { mutex_lock(&dev->class->p->class_mutex); /* tie the class to the device */ klist_add_tail(&dev->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ list_for_each_entry(class_intf, &dev->class->p->class_interfaces, node) if (class_intf->add_dev) class_intf->add_dev(dev, class_intf); mutex_unlock(&dev->class->p->class_mutex); } done: put_device(dev); return error; DPMError: bus_remove_device(dev); BusError: device_remove_attrs(dev); AttrsError: device_remove_class_symlinks(dev); SymlinkError: if (MAJOR(dev->devt)) devtmpfs_delete_node(dev); if (MAJOR(dev->devt)) device_remove_sys_dev_entry(dev); devtattrError: if (MAJOR(dev->devt)) device_remove_file(dev, &devt_attr); ueventattrError: device_remove_file(dev, &uevent_attr); attrError: kobject_uevent(&dev->kobj, KOBJ_REMOVE); kobject_del(&dev->kobj); Error: cleanup_device_parent(dev); if (parent) put_device(parent); name_error: kfree(dev->p); dev->p = NULL; goto done; } /** * device_register - register a device with the system. * @dev: pointer to the device structure * * This happens in two clean steps - initialize the device * and add it to the system. The two steps can be called * separately, but this is the easiest and most common. * I.e. you should only call the two helpers separately if * have a clearly defined need to use and refcount the device * before it is added to the hierarchy. * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up the * reference initialized in this function instead. */ int device_register(struct device *dev) { device_initialize(dev); return device_add(dev); } /** * get_device - increment reference count for device. * @dev: device. * * This simply forwards the call to kobject_get(), though * we do take care to provide for the case that we get a NULL * pointer passed in. */ struct device *get_device(struct device *dev) { return dev ? to_dev(kobject_get(&dev->kobj)) : NULL; } /** * put_device - decrement reference count. * @dev: device in question. */ void put_device(struct device *dev) { /* might_sleep(); */ if (dev) kobject_put(&dev->kobj); } /** * device_del - delete device from system. * @dev: device. * * This is the first part of the device unregistration * sequence. This removes the device from the lists we control * from here, has it removed from the other driver model * subsystems it was added to in device_add(), and removes it * from the kobject hierarchy. * * NOTE: this should be called manually _iff_ device_add() was * also called manually. */ void device_del(struct device *dev) { struct device *parent = dev->parent; struct class_interface *class_intf; /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DEL_DEVICE, dev); device_pm_remove(dev); dpm_sysfs_remove(dev); if (parent) klist_del(&dev->p->knode_parent); if (MAJOR(dev->devt)) { devtmpfs_delete_node(dev); device_remove_sys_dev_entry(dev); device_remove_file(dev, &devt_attr); } if (dev->class) { device_remove_class_symlinks(dev); mutex_lock(&dev->class->p->class_mutex); /* notify any interfaces that the device is now gone */ list_for_each_entry(class_intf, &dev->class->p->class_interfaces, node) if (class_intf->remove_dev) class_intf->remove_dev(dev, class_intf); /* remove the device from the class list */ klist_del(&dev->knode_class); mutex_unlock(&dev->class->p->class_mutex); } device_remove_file(dev, &uevent_attr); device_remove_attrs(dev); bus_remove_device(dev); /* * Some platform devices are driven without driver attached * and managed resources may have been acquired. Make sure * all resources are released. */ devres_release_all(dev); /* Notify the platform of the removal, in case they * need to do anything... */ if (platform_notify_remove) platform_notify_remove(dev); kobject_uevent(&dev->kobj, KOBJ_REMOVE); cleanup_device_parent(dev); kobject_del(&dev->kobj); put_device(parent); } /** * device_unregister - unregister device from system. * @dev: device going away. * * We do this in two parts, like we do device_register(). First, * we remove it from all the subsystems with device_del(), then * we decrement the reference count via put_device(). If that * is the final reference count, the device will be cleaned up * via device_release() above. Otherwise, the structure will * stick around until the final reference to the device is dropped. */ void device_unregister(struct device *dev) { pr_debug("device: '%s': %s\n", dev_name(dev), __func__); device_del(dev); put_device(dev); } static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *p; if (n) { p = to_device_private_parent(n); dev = p->device; } return dev; } /** * device_get_devnode - path of device node file * @dev: device * @mode: returned file access mode * @tmp: possibly allocated string * * Return the relative path of a possible device node. * Non-default names may need to allocate a memory to compose * a name. This memory is returned in tmp and needs to be * freed by the caller. */ const char *device_get_devnode(struct device *dev, mode_t *mode, const char **tmp) { char *s; *tmp = NULL; /* the device type may provide a specific name */ if (dev->type && dev->type->devnode) *tmp = dev->type->devnode(dev, mode); if (*tmp) return *tmp; /* the class may provide a specific name */ if (dev->class && dev->class->devnode) *tmp = dev->class->devnode(dev, mode); if (*tmp) return *tmp; /* return name without allocation, tmp == NULL */ if (strchr(dev_name(dev), '!') == NULL) return dev_name(dev); /* replace '!' in the name with '/' */ *tmp = kstrdup(dev_name(dev), GFP_KERNEL); if (!*tmp) return NULL; while ((s = strchr(*tmp, '!'))) s[0] = '/'; return *tmp; } /** * device_for_each_child - device child iterator. * @parent: parent struct device. * @data: data for the callback. * @fn: function to be called for each device. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. */ int device_for_each_child(struct device *parent, void *data, int (*fn)(struct device *dev, void *data)) { struct klist_iter i; struct device *child; int error = 0; if (!parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); while ((child = next_device(&i)) && !error) error = fn(child, data); klist_iter_exit(&i); return error; } /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device * @data: Data to pass to match function * @match: Callback function to check device * * This is similar to the device_for_each_child() function above, but it * returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero and a reference to the * current device can be obtained, this function will return to the caller * and not iterate over any more devices. */ struct device *device_find_child(struct device *parent, void *data, int (*match)(struct device *dev, void *data)) { struct klist_iter i; struct device *child; if (!parent) return NULL; klist_iter_init(&parent->p->klist_children, &i); while ((child = next_device(&i))) if (match(child, data) && get_device(child)) break; klist_iter_exit(&i); return child; } int __init devices_init(void) { devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); if (!devices_kset) return -ENOMEM; dev_kobj = kobject_create_and_add("dev", NULL); if (!dev_kobj) goto dev_kobj_err; sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); if (!sysfs_dev_block_kobj) goto block_kobj_err; sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); if (!sysfs_dev_char_kobj) goto char_kobj_err; return 0; char_kobj_err: kobject_put(sysfs_dev_block_kobj); block_kobj_err: kobject_put(dev_kobj); dev_kobj_err: kset_unregister(devices_kset); return -ENOMEM; } EXPORT_SYMBOL_GPL(device_for_each_child); EXPORT_SYMBOL_GPL(device_find_child); EXPORT_SYMBOL_GPL(device_initialize); EXPORT_SYMBOL_GPL(device_add); EXPORT_SYMBOL_GPL(device_register); EXPORT_SYMBOL_GPL(device_del); EXPORT_SYMBOL_GPL(device_unregister); EXPORT_SYMBOL_GPL(get_device); EXPORT_SYMBOL_GPL(put_device); EXPORT_SYMBOL_GPL(device_create_file); EXPORT_SYMBOL_GPL(device_remove_file); struct root_device { struct device dev; struct module *owner; }; inline struct root_device *to_root_device(struct device *d) { return container_of(d, struct root_device, dev); } static void root_device_release(struct device *dev) { kfree(to_root_device(dev)); } /** * __root_device_register - allocate and register a root device * @name: root device name * @owner: owner module of the root device, usually THIS_MODULE * * This function allocates a root device and registers it * using device_register(). In order to free the returned * device, use root_device_unregister(). * * Root devices are dummy devices which allow other devices * to be grouped under /sys/devices. Use this function to * allocate a root device and then use it as the parent of * any device which should appear under /sys/devices/{name} * * The /sys/devices/{name} directory will also contain a * 'module' symlink which points to the @owner directory * in sysfs. * * Returns &struct device pointer on success, or ERR_PTR() on error. * * Note: You probably want to use root_device_register(). */ struct device *__root_device_register(const char *name, struct module *owner) { struct root_device *root; int err = -ENOMEM; root = kzalloc(sizeof(struct root_device), GFP_KERNEL); if (!root) return ERR_PTR(err); err = dev_set_name(&root->dev, "%s", name); if (err) { kfree(root); return ERR_PTR(err); } root->dev.release = root_device_release; err = device_register(&root->dev); if (err) { put_device(&root->dev); return ERR_PTR(err); } #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ if (owner) { struct module_kobject *mk = &owner->mkobj; err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); if (err) { device_unregister(&root->dev); return ERR_PTR(err); } root->owner = owner; } #endif return &root->dev; } EXPORT_SYMBOL_GPL(__root_device_register); /** * root_device_unregister - unregister and free a root device * @dev: device going away * * This function unregisters and cleans up a device that was created by * root_device_register(). */ void root_device_unregister(struct device *dev) { struct root_device *root = to_root_device(dev); if (root->owner) sysfs_remove_link(&root->dev.kobj, "module"); device_unregister(dev); } EXPORT_SYMBOL_GPL(root_device_unregister); static void device_create_release(struct device *dev) { pr_debug("device: '%s': %s\n", dev_name(dev), __func__); kfree(dev); } /** * device_create_vargs - creates a device and registers it with sysfs * @class: pointer to the struct class that this device should be registered to * @parent: pointer to the parent struct device of this new device, if any * @devt: the dev_t for the char device to be added * @drvdata: the data to be added to the device for callbacks * @fmt: string for the device's name * @args: va_list for the device's name * * This function can be used by char device classes. A struct device * will be created in sysfs, registered to the specified class. * * A "dev" file will be created, showing the dev_t for the device, if * the dev_t is not 0,0. * If a pointer to a parent struct device is passed in, the newly created * struct device will be a child of that device in sysfs. * The pointer to the struct device will be returned from the call. * Any further sysfs files that might be required can be created using this * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. * * Note: the struct class passed to this function must have previously * been created with a call to class_create(). */ struct device *device_create_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (class == NULL || IS_ERR(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = class; dev->parent = parent; dev->release = device_create_release; dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_register(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } EXPORT_SYMBOL_GPL(device_create_vargs); /** * device_create - creates a device and registers it with sysfs * @class: pointer to the struct class that this device should be registered to * @parent: pointer to the parent struct device of this new device, if any * @devt: the dev_t for the char device to be added * @drvdata: the data to be added to the device for callbacks * @fmt: string for the device's name * * This function can be used by char device classes. A struct device * will be created in sysfs, registered to the specified class. * * A "dev" file will be created, showing the dev_t for the device, if * the dev_t is not 0,0. * If a pointer to a parent struct device is passed in, the newly created * struct device will be a child of that device in sysfs. * The pointer to the struct device will be returned from the call. * Any further sysfs files that might be required can be created using this * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. * * Note: the struct class passed to this function must have previously * been created with a call to class_create(). */ struct device *device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs); va_end(vargs); return dev; } EXPORT_SYMBOL_GPL(device_create); static int __match_devt(struct device *dev, void *data) { dev_t *devt = data; return dev->devt == *devt; } /** * device_destroy - removes a device that was created with device_create() * @class: pointer to the struct class that this device was registered with * @devt: the dev_t of the device that was previously registered * * This call unregisters and cleans up a device that was created with a * call to device_create(). */ void device_destroy(struct class *class, dev_t devt) { struct device *dev; dev = class_find_device(class, NULL, &devt, __match_devt); if (dev) { put_device(dev); device_unregister(dev); } } EXPORT_SYMBOL_GPL(device_destroy); /** * device_rename - renames a device * @dev: the pointer to the struct device to be renamed * @new_name: the new name of the device * * It is the responsibility of the caller to provide mutual * exclusion between two different calls of device_rename * on the same device to ensure that new_name is valid and * won't conflict with other devices. * * Note: Don't call this function. Currently, the networking layer calls this * function, but that will change. The following text from Kay Sievers offers * some insight: * * Renaming devices is racy at many levels, symlinks and other stuff are not * replaced atomically, and you get a "move" uevent, but it's not easy to * connect the event to the old and new device. Device nodes are not renamed at * all, there isn't even support for that in the kernel now. * * In the meantime, during renaming, your target name might be taken by another * driver, creating conflicts. Or the old name is taken directly after you * renamed it -- then you get events for the same DEVPATH, before you even see * the "move" event. It's just a mess, and nothing new should ever rely on * kernel device renaming. Besides that, it's not even implemented now for * other things than (driver-core wise very simple) network devices. * * We are currently about to change network renaming in udev to completely * disallow renaming of devices in the same namespace as the kernel uses, * because we can't solve the problems properly, that arise with swapping names * of multiple interfaces without races. Means, renaming of eth[0-9]* will only * be allowed to some other name than eth[0-9]*, for the aforementioned * reasons. * * Make up a "real" name in the driver before you register anything, or add * some other attributes for userspace to find the device, or use udev to add * symlinks -- but never rename kernel devices later, it's a complete mess. We * don't even want to get into that and try to implement the missing pieces in * the core. We really have other pieces to fix in the driver core mess. :) */ int device_rename(struct device *dev, const char *new_name) { char *old_class_name = NULL; char *new_class_name = NULL; char *old_device_name = NULL; int error; dev = get_device(dev); if (!dev) return -EINVAL; pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), __func__, new_name); old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); if (!old_device_name) { error = -ENOMEM; goto out; } if (dev->class) { error = sysfs_rename_link(&dev->class->p->subsys.kobj, &dev->kobj, old_device_name, new_name); if (error) goto out; } error = kobject_rename(&dev->kobj, new_name); if (error) goto out; out: put_device(dev); kfree(new_class_name); kfree(old_class_name); kfree(old_device_name); return error; } EXPORT_SYMBOL_GPL(device_rename); static int device_move_class_links(struct device *dev, struct device *old_parent, struct device *new_parent) { int error = 0; if (old_parent) sysfs_remove_link(&dev->kobj, "device"); if (new_parent) error = sysfs_create_link(&dev->kobj, &new_parent->kobj, "device"); return error; } /** * device_move - moves a device to a new parent * @dev: the pointer to the struct device to be moved * @new_parent: the new parent of the device (can by NULL) * @dpm_order: how to reorder the dpm_list */ int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order) { int error; struct device *old_parent; struct kobject *new_parent_kobj; dev = get_device(dev); if (!dev) return -EINVAL; device_pm_lock(); new_parent = get_device(new_parent); new_parent_kobj = get_device_parent(dev, new_parent); pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), __func__, new_parent ? dev_name(new_parent) : "<NULL>"); error = kobject_move(&dev->kobj, new_parent_kobj); if (error) { cleanup_glue_dir(dev, new_parent_kobj); put_device(new_parent); goto out; } old_parent = dev->parent; dev->parent = new_parent; if (old_parent) klist_remove(&dev->p->knode_parent); if (new_parent) { klist_add_tail(&dev->p->knode_parent, &new_parent->p->klist_children); set_dev_node(dev, dev_to_node(new_parent)); } if (!dev->class) goto out_put; error = device_move_class_links(dev, old_parent, new_parent); if (error) { /* We ignore errors on cleanup since we're hosed anyway... */ device_move_class_links(dev, new_parent, old_parent); if (!kobject_move(&dev->kobj, &old_parent->kobj)) { if (new_parent) klist_remove(&dev->p->knode_parent); dev->parent = old_parent; if (old_parent) { klist_add_tail(&dev->p->knode_parent, &old_parent->p->klist_children); set_dev_node(dev, dev_to_node(old_parent)); } } cleanup_glue_dir(dev, new_parent_kobj); put_device(new_parent); goto out; } switch (dpm_order) { case DPM_ORDER_NONE: break; case DPM_ORDER_DEV_AFTER_PARENT: device_pm_move_after(dev, new_parent); break; case DPM_ORDER_PARENT_BEFORE_DEV: device_pm_move_before(new_parent, dev); break; case DPM_ORDER_DEV_LAST: device_pm_move_last(dev); break; } out_put: put_device(old_parent); out: device_pm_unlock(); put_device(dev); return error; } EXPORT_SYMBOL_GPL(device_move); /** * device_shutdown - call ->shutdown() on each device to shutdown. */ void device_shutdown(void) { struct device *dev; spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. * Beware that device unplug events may also start pulling * devices offline, even as the system is shutting down. */ while (!list_empty(&devices_kset->list)) { dev = list_entry(devices_kset->list.prev, struct device, kobj.entry); get_device(dev); /* * Make sure the device is off the kset list, in the * event that dev->*->shutdown() doesn't remove it. */ list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); dev->bus->shutdown(dev); } else if (dev->driver && dev->driver->shutdown) { dev_dbg(dev, "shutdown\n"); dev->driver->shutdown(dev); } put_device(dev); spin_lock(&devices_kset->list_lock); } spin_unlock(&devices_kset->list_lock); async_synchronize_full(); } /* * Device logging functions */ #ifdef CONFIG_PRINTK static int __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) { if (!dev) return printk("%s(NULL device *): %pV", level, vaf); return printk("%s%s %s: %pV", level, dev_driver_string(dev), dev_name(dev), vaf); } int dev_printk(const char *level, const struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; int r; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; r = __dev_printk(level, dev, &vaf); va_end(args); return r; } EXPORT_SYMBOL(dev_printk); #define define_dev_printk_level(func, kern_level) \ int func(const struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ int r; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ r = __dev_printk(kern_level, dev, &vaf); \ va_end(args); \ \ return r; \ } \ EXPORT_SYMBOL(func); define_dev_printk_level(dev_emerg, KERN_EMERG); define_dev_printk_level(dev_alert, KERN_ALERT); define_dev_printk_level(dev_crit, KERN_CRIT); define_dev_printk_level(dev_err, KERN_ERR); define_dev_printk_level(dev_warn, KERN_WARNING); define_dev_printk_level(dev_notice, KERN_NOTICE); define_dev_printk_level(_dev_info, KERN_INFO); #endif
gpl-2.0
bendooks/amlogic-kenel
lib/dump_stack.c
1298
1148
/* * Provide a default dump_stack() function for architectures * which don't implement their own. */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/atomic.h> static void __dump_stack(void) { dump_stack_print_info(KERN_DEFAULT); show_stack(NULL, NULL); } /** * dump_stack - dump the current task information and its stack trace * * Architectures can override this implementation by implementing its own. */ #ifdef CONFIG_SMP static atomic_t dump_lock = ATOMIC_INIT(-1); asmlinkage __visible void dump_stack(void) { int was_locked; int old; int cpu; /* * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ preempt_disable(); retry: cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { was_locked = 0; } else if (old == cpu) { was_locked = 1; } else { cpu_relax(); goto retry; } __dump_stack(); if (!was_locked) atomic_set(&dump_lock, -1); preempt_enable(); } #else asmlinkage __visible void dump_stack(void) { __dump_stack(); } #endif EXPORT_SYMBOL(dump_stack);
gpl-2.0
Motorhead1991/android_kernel_dell_ventana
sound/isa/azt2320.c
1554
9995
/* card-azt2320.c - driver for Aztech Systems AZT2320 based soundcards. Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This driver should provide support for most Aztech AZT2320 based cards. Several AZT2316 chips are also supported/tested, but autoprobe doesn't work: all module option have to be set. No docs available for us at Aztech headquarters !!! Unbelievable ... No other help obtained. Thanks to Rainer Wiesner <rainer.wiesner@01019freenet.de> for the WSS activation method (full-duplex audio!). */ #include <asm/io.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/pnp.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/wss.h> #include <sound/mpu401.h> #include <sound/opl3.h> #define PFX "azt2320: " MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>"); MODULE_DESCRIPTION("Aztech Systems AZT2320"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3300}," "{Aztech Systems,AZT2320}," "{Aztech Systems,AZT3000}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */ static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for azt2320 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for azt2320 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable azt2320 based soundcard."); struct snd_card_azt2320 { int dev_no; struct pnp_dev *dev; struct pnp_dev *devmpu; struct snd_wss *chip; }; static struct pnp_card_device_id snd_azt2320_pnpids[] = { /* PRO16V */ { .id = "AZT1008", .devs = { { "AZT1008" }, { "AZT2001" }, } }, /* Aztech Sound Galaxy 16 */ { .id = "AZT2320", .devs = { { "AZT0001" }, { "AZT0002" }, } }, /* Packard Bell Sound III 336 AM/SP */ { .id = "AZT3000", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* AT3300 */ { .id = "AZT3002", .devs = { { "AZT1004" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3005", .devs = { { "AZT1003" }, { "AZT2001" }, } }, /* --- */ { .id = "AZT3011", .devs = { { "AZT1003" }, { "AZT2001" }, } }, { .id = "" } /* end */ }; MODULE_DEVICE_TABLE(pnp_card, snd_azt2320_pnpids); #define DRIVER_NAME "snd-card-azt2320" static int __devinit snd_card_azt2320_pnp(int dev, struct snd_card_azt2320 *acard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (acard->dev == NULL) return -ENODEV; acard->devmpu = pnp_request_card_device(card, id->devs[1].id, NULL); pdev = acard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n"); return err; } port[dev] = pnp_port_start(pdev, 0); fm_port[dev] = pnp_port_start(pdev, 1); wss_port[dev] = pnp_port_start(pdev, 2); dma1[dev] = pnp_dma(pdev, 0); dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); pdev = acard->devmpu; if (pdev != NULL) { err = pnp_activate_dev(pdev); if (err < 0) goto __mpu_error; mpu_port[dev] = pnp_port_start(pdev, 0); mpu_irq[dev] = pnp_irq(pdev, 0); } else { __mpu_error: if (pdev) { pnp_release_card_device(pdev); snd_printk(KERN_ERR PFX "MPU401 pnp configure failure, skipping\n"); } acard->devmpu = NULL; mpu_port[dev] = -1; } return 0; } /* same of snd_sbdsp_command by Jaroslav Kysela */ static int __devinit snd_card_azt2320_command(unsigned long port, unsigned char val) { int i; unsigned long limit; limit = jiffies + HZ / 10; for (i = 50000; i && time_after(limit, jiffies); i--) if (!(inb(port + 0x0c) & 0x80)) { outb(val, port + 0x0c); return 0; } return -EBUSY; } static int __devinit snd_card_azt2320_enable_wss(unsigned long port) { int error; if ((error = snd_card_azt2320_command(port, 0x09))) return error; if ((error = snd_card_azt2320_command(port, 0x00))) return error; mdelay(5); return 0; } static int __devinit snd_card_azt2320_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { int error; struct snd_card *card; struct snd_card_azt2320 *acard; struct snd_wss *chip; struct snd_opl3 *opl3; error = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_azt2320), &card); if (error < 0) return error; acard = (struct snd_card_azt2320 *)card->private_data; if ((error = snd_card_azt2320_pnp(dev, acard, pcard, pid))) { snd_card_free(card); return error; } snd_card_set_dev(card, &pcard->card->dev); if ((error = snd_card_azt2320_enable_wss(port[dev]))) { snd_card_free(card); return error; } error = snd_wss_create(card, wss_port[dev], -1, irq[dev], dma1[dev], dma2[dev], WSS_HW_DETECT, 0, &chip); if (error < 0) { snd_card_free(card); return error; } strcpy(card->driver, "AZT2320"); strcpy(card->shortname, "Aztech AZT2320"); sprintf(card->longname, "%s, WSS at 0x%lx, irq %i, dma %i&%i", card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]); error = snd_wss_pcm(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_mixer(chip); if (error < 0) { snd_card_free(card); return error; } error = snd_wss_timer(chip, 0, NULL); if (error < 0) { snd_card_free(card); return error; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (snd_mpu401_uart_new(card, 0, MPU401_HW_AZT2320, mpu_port[dev], 0, mpu_irq[dev], IRQF_DISABLED, NULL) < 0) snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) { if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2, OPL3_HW_AUTO, 0, &opl3) < 0) { snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n", fm_port[dev], fm_port[dev] + 2); } else { if ((error = snd_opl3_timer_new(opl3, 1, 2)) < 0) { snd_card_free(card); return error; } if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return error; } } } if ((error = snd_card_register(card)) < 0) { snd_card_free(card); return error; } pnp_set_card_drvdata(pcard, card); return 0; } static unsigned int __devinitdata azt2320_devices; static int __devinit snd_azt2320_pnp_detect(struct pnp_card_link *card, const struct pnp_card_device_id *id) { static int dev; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (!enable[dev]) continue; res = snd_card_azt2320_probe(dev, card, id); if (res < 0) return res; dev++; azt2320_devices++; return 0; } return -ENODEV; } static void __devexit snd_azt2320_pnp_remove(struct pnp_card_link * pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } #ifdef CONFIG_PM static int snd_azt2320_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->suspend(chip); return 0; } static int snd_azt2320_pnp_resume(struct pnp_card_link *pcard) { struct snd_card *card = pnp_get_card_drvdata(pcard); struct snd_card_azt2320 *acard = card->private_data; struct snd_wss *chip = acard->chip; chip->resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct pnp_card_driver azt2320_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = "azt2320", .id_table = snd_azt2320_pnpids, .probe = snd_azt2320_pnp_detect, .remove = __devexit_p(snd_azt2320_pnp_remove), #ifdef CONFIG_PM .suspend = snd_azt2320_pnp_suspend, .resume = snd_azt2320_pnp_resume, #endif }; static int __init alsa_card_azt2320_init(void) { int err; err = pnp_register_card_driver(&azt2320_pnpc_driver); if (err) return err; if (!azt2320_devices) { pnp_unregister_card_driver(&azt2320_pnpc_driver); #ifdef MODULE snd_printk(KERN_ERR "no AZT2320 based soundcards found\n"); #endif return -ENODEV; } return 0; } static void __exit alsa_card_azt2320_exit(void) { pnp_unregister_card_driver(&azt2320_pnpc_driver); } module_init(alsa_card_azt2320_init) module_exit(alsa_card_azt2320_exit)
gpl-2.0
SoftRoCE/rxe-dev
drivers/infiniband/hw/cxgb4/resource.c
1810
12298
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* Crude resource management */ #include <linux/spinlock.h> #include <linux/genalloc.h> #include <linux/ratelimit.h> #include "iw_cxgb4.h" static int c4iw_init_qid_table(struct c4iw_rdev *rdev) { u32 i; if (c4iw_id_table_alloc(&rdev->resource.qid_table, rdev->lldi.vr->qp.start, rdev->lldi.vr->qp.size, rdev->lldi.vr->qp.size, 0)) return -ENOMEM; for (i = rdev->lldi.vr->qp.start; i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) if (!(i & rdev->qpmask)) c4iw_id_free(&rdev->resource.qid_table, i); return 0; } /* nr_* must be power of 2 */ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) { int err = 0; err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, C4IW_ID_TABLE_F_RANDOM); if (err) goto tpt_err; err = c4iw_init_qid_table(rdev); if (err) goto qid_err; err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, nr_pdid, 1, 0); if (err) goto pdid_err; return 0; pdid_err: c4iw_id_table_free(&rdev->resource.qid_table); qid_err: c4iw_id_table_free(&rdev->resource.tpt_table); tpt_err: return -ENOMEM; } /* * returns 0 if no resource available */ u32 c4iw_get_resource(struct c4iw_id_table *id_table) { u32 entry; entry = c4iw_id_alloc(id_table); if (entry == (u32)(-1)) return 0; return entry; } void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) { PDBG("%s entry 0x%x\n", __func__, entry); c4iw_id_free(id_table, entry); } u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->cqids)) { entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) goto out; mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } /* * now put the same ids on the qp list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->qpids); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->cqids); mutex_unlock(&uctx->lock); } u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; u32 qid; int i; mutex_lock(&uctx->lock); if (!list_empty(&uctx->qpids)) { entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, entry); list_del(&entry->entry); qid = entry->qid; kfree(entry); } else { qid = c4iw_get_resource(&rdev->resource.qid_table); if (!qid) { mutex_lock(&rdev->stats.lock); rdev->stats.qid.fail++; mutex_unlock(&rdev->stats.lock); goto out; } mutex_lock(&rdev->stats.lock); rdev->stats.qid.cur += rdev->qpmask + 1; mutex_unlock(&rdev->stats.lock); for (i = qid+1; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->qpids); } /* * now put the same ids on the cq list since they all * map to the same db/gts page. */ entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = qid; list_add_tail(&entry->entry, &uctx->cqids); for (i = qid; i & rdev->qpmask; i++) { entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) goto out; entry->qid = i; list_add_tail(&entry->entry, &uctx->cqids); } } out: mutex_unlock(&uctx->lock); PDBG("%s qid 0x%x\n", __func__, qid); mutex_lock(&rdev->stats.lock); if (rdev->stats.qid.cur > rdev->stats.qid.max) rdev->stats.qid.max = rdev->stats.qid.cur; mutex_unlock(&rdev->stats.lock); return qid; } void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, struct c4iw_dev_ucontext *uctx) { struct c4iw_qid_list *entry; entry = kmalloc(sizeof *entry, GFP_KERNEL); if (!entry) return; PDBG("%s qid 0x%x\n", __func__, qid); entry->qid = qid; mutex_lock(&uctx->lock); list_add_tail(&entry->entry, &uctx->qpids); mutex_unlock(&uctx->lock); } void c4iw_destroy_resource(struct c4iw_resource *rscp) { c4iw_id_table_free(&rscp->tpt_table); c4iw_id_table_free(&rscp->qid_table); c4iw_id_table_free(&rscp->pdid_table); } /* * PBL Memory Manager. Uses Linux generic allocator. */ #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); if (rdev->stats.pbl.cur > rdev->stats.pbl.max) rdev->stats.pbl.max = rdev->stats.pbl.cur; } else rdev->stats.pbl.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); } int c4iw_pblpool_create(struct c4iw_rdev *rdev) { unsigned pbl_start, pbl_chunk, pbl_top; rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); if (!rdev->pbl_pool) return -ENOMEM; pbl_start = rdev->lldi.vr->pbl.start; pbl_chunk = rdev->lldi.vr->pbl.size; pbl_top = pbl_start + pbl_chunk; while (pbl_start < pbl_top) { pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { PDBG("%s failed to add PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { printk(KERN_WARNING MOD "Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start); return 0; } pbl_chunk >>= 1; } else { PDBG("%s added PBL chunk (%x/%x)\n", __func__, pbl_start, pbl_chunk); pbl_start += pbl_chunk; } } return 0; } void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->pbl_pool); } /* * RQT Memory Manager. Uses Linux generic allocator. */ #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); if (!addr) pr_warn_ratelimited(MOD "%s: Out of RQT memory\n", pci_name(rdev->lldi.pdev)); mutex_lock(&rdev->stats.lock); if (addr) { rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); if (rdev->stats.rqt.cur > rdev->stats.rqt.max) rdev->stats.rqt.max = rdev->stats.rqt.cur; } else rdev->stats.rqt.fail++; mutex_unlock(&rdev->stats.lock); return (u32)addr; } void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); mutex_lock(&rdev->stats.lock); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); } int c4iw_rqtpool_create(struct c4iw_rdev *rdev) { unsigned rqt_start, rqt_chunk, rqt_top; rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); if (!rdev->rqt_pool) return -ENOMEM; rqt_start = rdev->lldi.vr->rq.start; rqt_chunk = rdev->lldi.vr->rq.size; rqt_top = rqt_start + rqt_chunk; while (rqt_start < rqt_top) { rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { PDBG("%s failed to add RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { printk(KERN_WARNING MOD "Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start); return 0; } rqt_chunk >>= 1; } else { PDBG("%s added RQT chunk (%x/%x)\n", __func__, rqt_start, rqt_chunk); rqt_start += rqt_chunk; } } return 0; } void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->rqt_pool); } /* * On-Chip QP Memory. */ #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) { unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); if (addr) { mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) rdev->stats.ocqp.max = rdev->stats.ocqp.cur; mutex_unlock(&rdev->stats.lock); } return (u32)addr; } void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) { PDBG("%s addr 0x%x size %d\n", __func__, addr, size); mutex_lock(&rdev->stats.lock); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); mutex_unlock(&rdev->stats.lock); gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); } int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) { unsigned start, chunk, top; rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); if (!rdev->ocqp_pool) return -ENOMEM; start = rdev->lldi.vr->ocq.start; chunk = rdev->lldi.vr->ocq.size; top = start + chunk; while (start < top) { chunk = min(top - start + 1, chunk); if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { PDBG("%s failed to add OCQP chunk (%x/%x)\n", __func__, start, chunk); if (chunk <= 1024 << MIN_OCQP_SHIFT) { printk(KERN_WARNING MOD "Failed to add all OCQP chunks (%x/%x)\n", start, top - start); return 0; } chunk >>= 1; } else { PDBG("%s added OCQP chunk (%x/%x)\n", __func__, start, chunk); start += chunk; } } return 0; } void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) { gen_pool_destroy(rdev->ocqp_pool); }
gpl-2.0
niker/elitekernel_oxp
net/802/tr.c
3346
15492
/* * NET3: Token ring device handling subroutines * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Fixes: 3 Feb 97 Paul Norton <pnorton@cts.com> Minor routing fixes. * Added rif table to /proc/net/tr_rif and rif timeout to * /proc/sys/net/token-ring/rif_timeout. * 22 Jun 98 Paul Norton <p.norton@computer.org> Rearranged * tr_header and tr_type_trans to handle passing IPX SNAP and * 802.2 through the correct layers. Eliminated tr_reformat. * */ #include <asm/uaccess.h> #include <asm/system.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/trdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/net.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <net/arp.h> #include <net/net_namespace.h> static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev); static void rif_check_expire(unsigned long dummy); #define TR_SR_DEBUG 0 /* * Each RIF entry we learn is kept this way */ struct rif_cache { unsigned char addr[TR_ALEN]; int iface; __be16 rcf; __be16 rseg[8]; struct rif_cache *next; unsigned long last_used; unsigned char local_ring; }; #define RIF_TABLE_SIZE 32 /* * We hash the RIF cache 32 ways. We do after all have to look it * up a lot. */ static struct rif_cache *rif_table[RIF_TABLE_SIZE]; static DEFINE_SPINLOCK(rif_lock); /* * Garbage disposal timer. */ static struct timer_list rif_timer; static int sysctl_tr_rif_timeout = 60*10*HZ; static inline unsigned long rif_hash(const unsigned char *addr) { unsigned long x; x = addr[0]; x = (x << 2) ^ addr[1]; x = (x << 2) ^ addr[2]; x = (x << 2) ^ addr[3]; x = (x << 2) ^ addr[4]; x = (x << 2) ^ addr[5]; x ^= x >> 8; return x & (RIF_TABLE_SIZE - 1); } /* * Put the headers on a token ring packet. Token ring source routing * makes this a little more exciting than on ethernet. */ static int tr_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct trh_hdr *trh; int hdr_len; /* * Add the 802.2 SNAP header if IP as the IPv4/IPv6 code calls * dev->hard_header directly. */ if (type == ETH_P_IP || type == ETH_P_IPV6 || type == ETH_P_ARP) { struct trllc *trllc; hdr_len = sizeof(struct trh_hdr) + sizeof(struct trllc); trh = (struct trh_hdr *)skb_push(skb, hdr_len); trllc = (struct trllc *)(trh+1); trllc->dsap = trllc->ssap = EXTENDED_SAP; trllc->llc = UI_CMD; trllc->protid[0] = trllc->protid[1] = trllc->protid[2] = 0x00; trllc->ethertype = htons(type); } else { hdr_len = sizeof(struct trh_hdr); trh = (struct trh_hdr *)skb_push(skb, hdr_len); } trh->ac=AC; trh->fc=LLC_FRAME; if(saddr) memcpy(trh->saddr,saddr,dev->addr_len); else memcpy(trh->saddr,dev->dev_addr,dev->addr_len); /* * Build the destination and then source route the frame */ if(daddr) { memcpy(trh->daddr,daddr,dev->addr_len); tr_source_route(skb, trh, dev); return hdr_len; } return -hdr_len; } /* * A neighbour discovery of some species (eg arp) has completed. We * can now send the packet. */ static int tr_rebuild_header(struct sk_buff *skb) { struct trh_hdr *trh=(struct trh_hdr *)skb->data; struct trllc *trllc=(struct trllc *)(skb->data+sizeof(struct trh_hdr)); struct net_device *dev = skb->dev; /* * FIXME: We don't yet support IPv6 over token rings */ if(trllc->ethertype != htons(ETH_P_IP)) { printk("tr_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(trllc->ethertype)); return 0; } #ifdef CONFIG_INET if(arp_find(trh->daddr, skb)) { return 1; } else #endif { tr_source_route(skb,trh,dev); return 0; } } /* * Some of this is a bit hackish. We intercept RIF information * used for source routing. We also grab IP directly and don't feed * it via SNAP. */ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev) { struct trh_hdr *trh; struct trllc *trllc; unsigned riflen=0; skb->dev = dev; skb_reset_mac_header(skb); trh = tr_hdr(skb); if(trh->saddr[0] & TR_RII) riflen = (ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8; trllc = (struct trllc *)(skb->data+sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); skb_pull(skb,sizeof(struct trh_hdr)-TR_MAXRIFLEN+riflen); if(*trh->daddr & 0x80) { if(!memcmp(trh->daddr,dev->broadcast,TR_ALEN)) skb->pkt_type=PACKET_BROADCAST; else skb->pkt_type=PACKET_MULTICAST; } else if ( (trh->daddr[0] & 0x01) && (trh->daddr[1] & 0x00) && (trh->daddr[2] & 0x5E)) { skb->pkt_type=PACKET_MULTICAST; } else if(dev->flags & IFF_PROMISC) { if(memcmp(trh->daddr, dev->dev_addr, TR_ALEN)) skb->pkt_type=PACKET_OTHERHOST; } if ((skb->pkt_type != PACKET_BROADCAST) && (skb->pkt_type != PACKET_MULTICAST)) tr_add_rif_info(trh,dev) ; /* * Strip the SNAP header from ARP packets since we don't * pass them through to the 802.2/SNAP layers. */ if (trllc->dsap == EXTENDED_SAP && (trllc->ethertype == htons(ETH_P_IP) || trllc->ethertype == htons(ETH_P_IPV6) || trllc->ethertype == htons(ETH_P_ARP))) { skb_pull(skb, sizeof(struct trllc)); return trllc->ethertype; } return htons(ETH_P_TR_802_2); } /* * We try to do source routing... */ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh, struct net_device *dev) { int slack; unsigned int hash; struct rif_cache *entry; unsigned char *olddata; unsigned long flags; static const unsigned char mcast_func_addr[] = {0xC0,0x00,0x00,0x04,0x00,0x00}; spin_lock_irqsave(&rif_lock, flags); /* * Broadcasts are single route as stated in RFC 1042 */ if( (!memcmp(&(trh->daddr[0]),&(dev->broadcast[0]),TR_ALEN)) || (!memcmp(&(trh->daddr[0]),&(mcast_func_addr[0]), TR_ALEN)) ) { trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); trh->saddr[0]|=TR_RII; } else { hash = rif_hash(trh->daddr); /* * Walk the hash table and look for an entry */ for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->daddr[0]),TR_ALEN);entry=entry->next); /* * If we found an entry we can route the frame. */ if(entry) { #if TR_SR_DEBUG printk("source routing for %pM\n", trh->daddr); #endif if(!entry->local_ring && (ntohs(entry->rcf) & TR_RCF_LEN_MASK) >> 8) { trh->rcf=entry->rcf; memcpy(&trh->rseg[0],&entry->rseg[0],8*sizeof(unsigned short)); trh->rcf^=htons(TR_RCF_DIR_BIT); trh->rcf&=htons(0x1fff); /* Issam Chehab <ichehab@madge1.demon.co.uk> */ trh->saddr[0]|=TR_RII; #if TR_SR_DEBUG printk("entry found with rcf %04x\n", entry->rcf); } else { printk("entry found but without rcf length, local=%02x\n", entry->local_ring); #endif } entry->last_used=jiffies; } else { /* * Without the information we simply have to shout * on the wire. The replies should rapidly clean this * situation up. */ trh->rcf=htons((((sizeof(trh->rcf)) << 8) & TR_RCF_LEN_MASK) | TR_RCF_FRAME2K | TR_RCF_LIMITED_BROADCAST); trh->saddr[0]|=TR_RII; #if TR_SR_DEBUG printk("no entry in rif table found - broadcasting frame\n"); #endif } } /* Compress the RIF here so we don't have to do it in the driver(s) */ if (!(trh->saddr[0] & 0x80)) slack = 18; else slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); olddata = skb->data; spin_unlock_irqrestore(&rif_lock, flags); skb_pull(skb, slack); memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); } /* * We have learned some new RIF information for our source * routing. */ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) { unsigned int hash, rii_p = 0; unsigned long flags; struct rif_cache *entry; unsigned char saddr0; spin_lock_irqsave(&rif_lock, flags); saddr0 = trh->saddr[0]; /* * Firstly see if the entry exists */ if(trh->saddr[0] & TR_RII) { trh->saddr[0]&=0x7f; if (((ntohs(trh->rcf) & TR_RCF_LEN_MASK) >> 8) > 2) { rii_p = 1; } } hash = rif_hash(trh->saddr); for(entry=rif_table[hash];entry && memcmp(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN);entry=entry->next); if(entry==NULL) { #if TR_SR_DEBUG printk("adding rif_entry: addr:%pM rcf:%04X\n", trh->saddr, ntohs(trh->rcf)); #endif /* * Allocate our new entry. A failure to allocate loses * use the information. This is harmless. * * FIXME: We ought to keep some kind of cache size * limiting and adjust the timers to suit. */ entry=kmalloc(sizeof(struct rif_cache),GFP_ATOMIC); if(!entry) { printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); spin_unlock_irqrestore(&rif_lock, flags); return; } memcpy(&(entry->addr[0]),&(trh->saddr[0]),TR_ALEN); entry->iface = dev->ifindex; entry->next=rif_table[hash]; entry->last_used=jiffies; rif_table[hash]=entry; if (rii_p) { entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); entry->local_ring = 0; } else { entry->local_ring = 1; } } else /* Y. Tahara added */ { /* * Update existing entries */ if (!entry->local_ring) if (entry->rcf != (trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK)) && !(trh->rcf & htons(TR_RCF_BROADCAST_MASK))) { #if TR_SR_DEBUG printk("updating rif_entry: addr:%pM rcf:%04X\n", trh->saddr, ntohs(trh->rcf)); #endif entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); } entry->last_used=jiffies; } trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ spin_unlock_irqrestore(&rif_lock, flags); } /* * Scan the cache with a timer and see what we need to throw out. */ static void rif_check_expire(unsigned long dummy) { int i; unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; spin_lock_irqsave(&rif_lock, flags); for(i =0; i < RIF_TABLE_SIZE; i++) { struct rif_cache *entry, **pentry; pentry = rif_table+i; while((entry=*pentry) != NULL) { unsigned long expires = entry->last_used + sysctl_tr_rif_timeout; if (time_before_eq(expires, jiffies)) { *pentry = entry->next; kfree(entry); } else { pentry = &entry->next; if (time_before(expires, next_interval)) next_interval = expires; } } } spin_unlock_irqrestore(&rif_lock, flags); mod_timer(&rif_timer, next_interval); } /* * Generate the /proc/net information for the token ring RIF * routing. */ #ifdef CONFIG_PROC_FS static struct rif_cache *rif_get_idx(loff_t pos) { int i; struct rif_cache *entry; loff_t off = 0; for(i = 0; i < RIF_TABLE_SIZE; i++) for(entry = rif_table[i]; entry; entry = entry->next) { if (off == pos) return entry; ++off; } return NULL; } static void *rif_seq_start(struct seq_file *seq, loff_t *pos) __acquires(&rif_lock) { spin_lock_irq(&rif_lock); return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; } static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int i; struct rif_cache *ent = v; ++*pos; if (v == SEQ_START_TOKEN) { i = -1; goto scan; } if (ent->next) return ent->next; i = rif_hash(ent->addr); scan: while (++i < RIF_TABLE_SIZE) { if ((ent = rif_table[i]) != NULL) return ent; } return NULL; } static void rif_seq_stop(struct seq_file *seq, void *v) __releases(&rif_lock) { spin_unlock_irq(&rif_lock); } static int rif_seq_show(struct seq_file *seq, void *v) { int j, rcf_len, segment, brdgnmb; struct rif_cache *entry = v; if (v == SEQ_START_TOKEN) seq_puts(seq, "if TR address TTL rcf routing segments\n"); else { struct net_device *dev = dev_get_by_index(&init_net, entry->iface); long ttl = (long) (entry->last_used + sysctl_tr_rif_timeout) - (long) jiffies; seq_printf(seq, "%s %pM %7li ", dev?dev->name:"?", entry->addr, ttl/HZ); if (entry->local_ring) seq_puts(seq, "local\n"); else { seq_printf(seq, "%04X", ntohs(entry->rcf)); rcf_len = ((ntohs(entry->rcf) & TR_RCF_LEN_MASK)>>8)-2; if (rcf_len) rcf_len >>= 1; for(j = 1; j < rcf_len; j++) { if(j==1) { segment=ntohs(entry->rseg[j-1])>>4; seq_printf(seq," %03X",segment); } segment=ntohs(entry->rseg[j])>>4; brdgnmb=ntohs(entry->rseg[j-1])&0x00f; seq_printf(seq,"-%01X-%03X",brdgnmb,segment); } seq_putc(seq, '\n'); } if (dev) dev_put(dev); } return 0; } static const struct seq_operations rif_seq_ops = { .start = rif_seq_start, .next = rif_seq_next, .stop = rif_seq_stop, .show = rif_seq_show, }; static int rif_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rif_seq_ops); } static const struct file_operations rif_seq_fops = { .owner = THIS_MODULE, .open = rif_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif static const struct header_ops tr_header_ops = { .create = tr_header, .rebuild= tr_rebuild_header, }; static void tr_setup(struct net_device *dev) { /* * Configure and register */ dev->header_ops = &tr_header_ops; dev->type = ARPHRD_IEEE802_TR; dev->hard_header_len = TR_HLEN; dev->mtu = 2000; dev->addr_len = TR_ALEN; dev->tx_queue_len = 100; /* Long queues on tr */ memset(dev->broadcast,0xFF, TR_ALEN); /* New-style flags. */ dev->flags = IFF_BROADCAST | IFF_MULTICAST ; } /** * alloc_trdev - Register token ring device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this token ring device * * Fill in the fields of the device structure with token ring-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_trdev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "tr%d", tr_setup); } #ifdef CONFIG_SYSCTL static struct ctl_table tr_table[] = { { .procname = "rif_timeout", .data = &sysctl_tr_rif_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { }, }; static __initdata struct ctl_path tr_path[] = { { .procname = "net", }, { .procname = "token-ring", }, { } }; #endif /* * Called during bootup. We don't actually have to initialise * too much for this. */ static int __init rif_init(void) { rif_timer.expires = jiffies + sysctl_tr_rif_timeout; setup_timer(&rif_timer, rif_check_expire, 0); add_timer(&rif_timer); #ifdef CONFIG_SYSCTL register_sysctl_paths(tr_path, tr_table); #endif proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); return 0; } module_init(rif_init); EXPORT_SYMBOL(tr_type_trans); EXPORT_SYMBOL(alloc_trdev); MODULE_LICENSE("GPL");
gpl-2.0
intervigilium/android_kernel_htc_msm7x30
net/802/hippi.c
3346
6076
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * HIPPI-type device handling. * * Version: @(#)hippi.c 1.0.0 05/29/97 * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Florian La Roche, <rzsfl@rz.uni-sb.de> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Jes Sorensen, <Jes.Sorensen@cern.ch> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/hippidevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <net/arp.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/system.h> /* * Create the HIPPI MAC header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) */ static int hippi_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; if (!len){ len = skb->len - HIPPI_HLEN; printk("hippi_header(): length not supplied\n"); } /* * Due to the stupidity of the little endian byte-order we * have to set the fp field this way. */ hip->fp.fixed = htonl(0x04800018); hip->fp.d2_size = htonl(len + 8); hip->le.fc = 0; hip->le.double_wide = 0; /* only HIPPI 800 for the time being */ hip->le.message_type = 0; /* Data PDU */ hip->le.dest_addr_type = 2; /* 12 bit SC address */ hip->le.src_addr_type = 2; /* 12 bit SC address */ memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3); memset(&hip->le.reserved, 0, 16); hip->snap.dsap = HIPPI_EXTENDED_SAP; hip->snap.ssap = HIPPI_EXTENDED_SAP; hip->snap.ctrl = HIPPI_UI_CMD; hip->snap.oui[0] = 0x00; hip->snap.oui[1] = 0x00; hip->snap.oui[2] = 0x00; hip->snap.ethertype = htons(type); if (daddr) { memcpy(hip->le.dest_switch_addr, daddr + 3, 3); memcpy(&hcb->ifield, daddr + 2, 4); return HIPPI_HLEN; } hcb->ifield = 0; return -((int)HIPPI_HLEN); } /* * Rebuild the HIPPI MAC header. This is called after an ARP has * completed on this sk_buff. We now let ARP fill in the other fields. */ static int hippi_rebuild_header(struct sk_buff *skb) { struct hippi_hdr *hip = (struct hippi_hdr *)skb->data; /* * Only IP is currently supported */ if(hip->snap.ethertype != htons(ETH_P_IP)) { printk(KERN_DEBUG "%s: unable to resolve type %X addresses.\n",skb->dev->name,ntohs(hip->snap.ethertype)); return 0; } /* * We don't support dynamic ARP on HIPPI, but we use the ARP * static ARP tables to hold the I-FIELDs. */ return arp_find(hip->le.daddr, skb); } /* * Determine the packet's protocol ID. */ __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev) { struct hippi_hdr *hip; /* * This is actually wrong ... question is if we really should * set the raw address here. */ skb->dev = dev; skb_reset_mac_header(skb); hip = (struct hippi_hdr *)skb_mac_header(skb); skb_pull(skb, HIPPI_HLEN); /* * No fancy promisc stuff here now. */ return hip->snap.ethertype; } EXPORT_SYMBOL(hippi_type_trans); int hippi_change_mtu(struct net_device *dev, int new_mtu) { /* * HIPPI's got these nice large MTUs. */ if ((new_mtu < 68) || (new_mtu > 65280)) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(hippi_change_mtu); /* * For HIPPI we will actually use the lower 4 bytes of the hardware * address as the I-FIELD rather than the actual hardware address. */ int hippi_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; if (netif_running(dev)) return -EBUSY; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; } EXPORT_SYMBOL(hippi_mac_addr); int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) { /* Never send broadcast/multicast ARP messages */ p->mcast_probes = 0; /* In IPv6 unicast probes are valid even on NBMA, * because they are encapsulated in normal IPv6 protocol. * Should be a generic flag. */ if (p->tbl->family != AF_INET6) p->ucast_probes = 0; return 0; } EXPORT_SYMBOL(hippi_neigh_setup_dev); static const struct header_ops hippi_header_ops = { .create = hippi_header, .rebuild = hippi_rebuild_header, }; static void hippi_setup(struct net_device *dev) { dev->header_ops = &hippi_header_ops; /* * We don't support HIPPI `ARP' for the time being, and probably * never will unless someone else implements it. However we * still need a fake ARPHRD to make ifconfig and friends play ball. */ dev->type = ARPHRD_HIPPI; dev->hard_header_len = HIPPI_HLEN; dev->mtu = 65280; dev->addr_len = HIPPI_ALEN; dev->tx_queue_len = 25 /* 5 */; memset(dev->broadcast, 0xFF, HIPPI_ALEN); /* * HIPPI doesn't support broadcast+multicast and we only use * static ARP tables. ARP is disabled by hippi_neigh_setup_dev. */ dev->flags = 0; } /** * alloc_hippi_dev - Register HIPPI device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this HIPPI device * * Fill in the fields of the device structure with HIPPI-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_hippi_dev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "hip%d", hippi_setup); } EXPORT_SYMBOL(alloc_hippi_dev);
gpl-2.0
KFire-Android/kernel_omap_bowser-common
drivers/macintosh/windfarm_pm121.c
3346
25397
/* * Windfarm PowerMac thermal control. iMac G5 iSight * * (c) Copyright 2007 Étienne Bersac <bersace@gmail.com> * * Bits & pieces from windfarm_pm81.c by (c) Copyright 2005 Benjamin * Herrenschmidt, IBM Corp. <benh@kernel.crashing.org> * * Released under the term of the GNU GPL v2. * * * * PowerMac12,1 * ============ * * * The algorithm used is the PID control algorithm, used the same way * the published Darwin code does, using the same values that are * present in the Darwin 8.10 snapshot property lists (note however * that none of the code has been re-used, it's a complete * re-implementation * * There is two models using PowerMac12,1. Model 2 is iMac G5 iSight * 17" while Model 3 is iMac G5 20". They do have both the same * controls with a tiny difference. The control-ids of hard-drive-fan * and cpu-fan is swapped. * * * Target Correction : * * controls have a target correction calculated as : * * new_min = ((((average_power * slope) >> 16) + offset) >> 16) + min_value * new_value = max(new_value, max(new_min, 0)) * * OD Fan control correction. * * # model_id: 2 * offset : -19563152 * slope : 1956315 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * HD Fan control correction. * * # model_id: 2 * offset : -15650652 * slope : 1565065 * * # model_id: 3 * offset : -19563152 * slope : 1956315 * * CPU Fan control correction. * * # model_id: 2 * offset : -25431900 * slope : 2543190 * * # model_id: 3 * offset : -15650652 * slope : 1565065 * * * Target rubber-banding : * * Some controls have a target correction which depends on another * control value. The correction is computed in the following way : * * new_min = ref_value * slope + offset * * ref_value is the value of the reference control. If new_min is * greater than 0, then we correct the target value using : * * new_target = max (new_target, new_min >> 16) * * * # model_id : 2 * control : cpu-fan * ref : optical-drive-fan * offset : -15650652 * slope : 1565065 * * # model_id : 3 * control : optical-drive-fan * ref : hard-drive-fan * offset : -32768000 * slope : 65536 * * * In order to have the moste efficient correction with those * dependencies, we must trigger HD loop before OD loop before CPU * loop. * * * The various control loops found in Darwin config file are: * * HD Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002D70A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : hard-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x002170A3 * G_r = 0x00019999 * History = 2 entries * Input target = 0x370000 * Interval = 5s * * OD Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * # model_id: 3 * control : optical-drive-fan * sensor : optical-drive-temp * PID params : G_d = 0x00000000 * G_p = 0x001FAE14 * G_r = 0x00019999 * History = 2 entries * Input target = 0x320000 * Interval = 5s * * GPU Fan control loop. * * # model_id: 2 * control : hard-drive-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x002A6666 * G_r = 0x00019999 * History = 2 entries * Input target = 0x5A0000 * Interval = 5s * * # model_id: 3 * control : cpu-fan * sensor : gpu-temp * PID params : G_d = 0x00000000 * G_p = 0x0010CCCC * G_r = 0x00019999 * History = 2 entries * Input target = 0x500000 * Interval = 5s * * KODIAK (aka northbridge) Fan control loop. * * # model_id: 2 * control : optical-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x003BD70A * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * # model_id: 3 * control : hard-drive-fan * sensor : north-bridge-temp * PID params : G_d = 0x00000000 * G_p = 0x0030F5C2 * G_r = 0x00019999 * History = 2 entries * Input target = 0x550000 * Interval = 5s * * CPU Fan control loop. * * control : cpu-fan * sensors : cpu-temp, cpu-power * PID params : from SDB partition * * * CPU Slew control loop. * * control : cpufreq-clamp * sensor : cpu-temp * */ #undef DEBUG #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sections.h> #include <asm/smu.h> #include "windfarm.h" #include "windfarm_pid.h" #define VERSION "0.3" static int pm121_mach_model; /* machine model id */ /* Controls & sensors */ static struct wf_sensor *sensor_cpu_power; static struct wf_sensor *sensor_cpu_temp; static struct wf_sensor *sensor_cpu_voltage; static struct wf_sensor *sensor_cpu_current; static struct wf_sensor *sensor_gpu_temp; static struct wf_sensor *sensor_north_bridge_temp; static struct wf_sensor *sensor_hard_drive_temp; static struct wf_sensor *sensor_optical_drive_temp; static struct wf_sensor *sensor_incoming_air_temp; /* unused ! */ enum { FAN_CPU, FAN_HD, FAN_OD, CPUFREQ, N_CONTROLS }; static struct wf_control *controls[N_CONTROLS] = {}; /* Set to kick the control loop into life */ static int pm121_all_controls_ok, pm121_all_sensors_ok, pm121_started; enum { FAILURE_FAN = 1 << 0, FAILURE_SENSOR = 1 << 1, FAILURE_OVERTEMP = 1 << 2 }; /* All sys loops. Note the HD before the OD loop in order to have it run before. */ enum { LOOP_GPU, /* control = hd or cpu, but luckily, it doesn't matter */ LOOP_HD, /* control = hd */ LOOP_KODIAK, /* control = hd or od */ LOOP_OD, /* control = od */ N_LOOPS }; static const char *loop_names[N_LOOPS] = { "GPU", "HD", "KODIAK", "OD", }; #define PM121_NUM_CONFIGS 2 static unsigned int pm121_failure_state; static int pm121_readjust, pm121_skipping; static s32 average_power; struct pm121_correction { int offset; int slope; }; static struct pm121_correction corrections[N_CONTROLS][PM121_NUM_CONFIGS] = { /* FAN_OD */ { /* MODEL 2 */ { .offset = -19563152, .slope = 1956315 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* FAN_HD */ { /* MODEL 2 */ { .offset = -15650652, .slope = 1565065 }, /* MODEL 3 */ { .offset = -19563152, .slope = 1956315 }, }, /* FAN_CPU */ { /* MODEL 2 */ { .offset = -25431900, .slope = 2543190 }, /* MODEL 3 */ { .offset = -15650652, .slope = 1565065 }, }, /* CPUFREQ has no correction (and is not implemented at all) */ }; struct pm121_connection { unsigned int control_id; unsigned int ref_id; struct pm121_correction correction; }; static struct pm121_connection pm121_connections[] = { /* MODEL 2 */ { .control_id = FAN_CPU, .ref_id = FAN_OD, { .offset = -32768000, .slope = 65536 } }, /* MODEL 3 */ { .control_id = FAN_OD, .ref_id = FAN_HD, { .offset = -32768000, .slope = 65536 } }, }; /* pointer to the current model connection */ static struct pm121_connection *pm121_connection; /* * ****** System Fans Control Loop ****** * */ /* Since each loop handles only one control and we want to avoid * writing virtual control, we store the control correction with the * loop params. Some data are not set, there are common to all loop * and thus, hardcoded. */ struct pm121_sys_param { /* purely informative since we use mach_model-2 as index */ int model_id; struct wf_sensor **sensor; /* use sensor_id instead ? */ s32 gp, itarget; unsigned int control_id; }; static struct pm121_sys_param pm121_sys_all_params[N_LOOPS][PM121_NUM_CONFIGS] = { /* GPU Fan control loop */ { { .model_id = 2, .sensor = &sensor_gpu_temp, .gp = 0x002A6666, .itarget = 0x5A0000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_gpu_temp, .gp = 0x0010CCCC, .itarget = 0x500000, .control_id = FAN_CPU, }, }, /* HD Fan control loop */ { { .model_id = 2, .sensor = &sensor_hard_drive_temp, .gp = 0x002D70A3, .itarget = 0x370000, .control_id = FAN_HD, }, { .model_id = 3, .sensor = &sensor_hard_drive_temp, .gp = 0x002170A3, .itarget = 0x370000, .control_id = FAN_HD, }, }, /* KODIAK Fan control loop */ { { .model_id = 2, .sensor = &sensor_north_bridge_temp, .gp = 0x003BD70A, .itarget = 0x550000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_north_bridge_temp, .gp = 0x0030F5C2, .itarget = 0x550000, .control_id = FAN_HD, }, }, /* OD Fan control loop */ { { .model_id = 2, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, { .model_id = 3, .sensor = &sensor_optical_drive_temp, .gp = 0x001FAE14, .itarget = 0x320000, .control_id = FAN_OD, }, }, }; /* the hardcoded values */ #define PM121_SYS_GD 0x00000000 #define PM121_SYS_GR 0x00019999 #define PM121_SYS_HISTORY_SIZE 2 #define PM121_SYS_INTERVAL 5 /* State data used by the system fans control loop */ struct pm121_sys_state { int ticks; s32 setpoint; struct wf_pid_state pid; }; struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {}; /* * ****** CPU Fans Control Loop ****** * */ #define PM121_CPU_INTERVAL 1 /* State data used by the cpu fans control loop */ struct pm121_cpu_state { int ticks; s32 setpoint; struct wf_cpu_pid_state pid; }; static struct pm121_cpu_state *pm121_cpu_state; /* * ***** Implementation ***** * */ /* correction the value using the output-low-bound correction algo */ static s32 pm121_correct(s32 new_setpoint, unsigned int control_id, s32 min) { s32 new_min; struct pm121_correction *correction; correction = &corrections[control_id][pm121_mach_model - 2]; new_min = (average_power * correction->slope) >> 16; new_min += correction->offset; new_min = (new_min >> 16) + min; return max3(new_setpoint, new_min, 0); } static s32 pm121_connect(unsigned int control_id, s32 setpoint) { s32 new_min, value, new_setpoint; if (pm121_connection->control_id == control_id) { controls[control_id]->ops->get_value(controls[control_id], &value); new_min = value * pm121_connection->correction.slope; new_min += pm121_connection->correction.offset; if (new_min > 0) { new_setpoint = max(setpoint, (new_min >> 16)); if (new_setpoint != setpoint) { pr_debug("pm121: %s depending on %s, " "corrected from %d to %d RPM\n", controls[control_id]->name, controls[pm121_connection->ref_id]->name, (int) setpoint, (int) new_setpoint); } } else new_setpoint = setpoint; } /* no connection */ else new_setpoint = setpoint; return new_setpoint; } /* FAN LOOPS */ static void pm121_create_sys_fans(int loop_id) { struct pm121_sys_param *param = NULL; struct wf_pid_param pid_param; struct wf_control *control = NULL; int i; /* First, locate the params for this model */ for (i = 0; i < PM121_NUM_CONFIGS; i++) { if (pm121_sys_all_params[loop_id][i].model_id == pm121_mach_model) { param = &(pm121_sys_all_params[loop_id][i]); break; } } /* No params found, put fans to max */ if (param == NULL) { printk(KERN_WARNING "pm121: %s fan config not found " " for this machine model\n", loop_names[loop_id]); goto fail; } control = controls[param->control_id]; /* Alloc & initialize state */ pm121_sys_state[loop_id] = kmalloc(sizeof(struct pm121_sys_state), GFP_KERNEL); if (pm121_sys_state[loop_id] == NULL) { printk(KERN_WARNING "pm121: Memory allocation error\n"); goto fail; } pm121_sys_state[loop_id]->ticks = 1; /* Fill PID params */ pid_param.gd = PM121_SYS_GD; pid_param.gp = param->gp; pid_param.gr = PM121_SYS_GR; pid_param.interval = PM121_SYS_INTERVAL; pid_param.history_len = PM121_SYS_HISTORY_SIZE; pid_param.itarget = param->itarget; pid_param.min = control->ops->get_min(control); pid_param.max = control->ops->get_max(control); wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param); pr_debug("pm121: %s Fan control loop initialized.\n" " itarged=%d.%03d, min=%d RPM, max=%d RPM\n", loop_names[loop_id], FIX32TOPRINT(pid_param.itarget), pid_param.min, pid_param.max); return; fail: /* note that this is not optimal since another loop may still control the same control */ printk(KERN_WARNING "pm121: failed to set up %s loop " "setting \"%s\" to max speed.\n", loop_names[loop_id], control->name); if (control) wf_control_set_max(control); } static void pm121_sys_fans_tick(int loop_id) { struct pm121_sys_param *param; struct pm121_sys_state *st; struct wf_sensor *sensor; struct wf_control *control; s32 temp, new_setpoint; int rc; param = &(pm121_sys_all_params[loop_id][pm121_mach_model-2]); st = pm121_sys_state[loop_id]; sensor = *(param->sensor); control = controls[param->control_id]; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_SYS_INTERVAL; rc = sensor->ops->get_value(sensor, &temp); if (rc) { printk(KERN_WARNING "windfarm: %s sensor error %d\n", sensor->name, rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: %s Fan tick ! %s: %d.%03d\n", loop_names[loop_id], sensor->name, FIX32TOPRINT(temp)); new_setpoint = wf_pid_run(&st->pid, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, param->control_id, st->pid.param.min); /* linked corretion */ new_setpoint = pm121_connect(param->control_id, new_setpoint); if (new_setpoint == st->setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: %s corrected setpoint: %d RPM\n", control->name, (int)new_setpoint); readjust: if (control && pm121_failure_state == 0) { rc = control->ops->set_value(control, st->setpoint); if (rc) { printk(KERN_WARNING "windfarm: %s fan error %d\n", control->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* CPU LOOP */ static void pm121_create_cpu_fans(void) { struct wf_cpu_pid_param pid_param; const struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct smu_sdbp_fvt *fvt; struct wf_control *fan_cpu; s32 tmax, tdelta, maxpow, powadj; fan_cpu = controls[FAN_CPU]; /* First, locate the PID params in SMU SBD */ hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL); if (hdr == 0) { printk(KERN_WARNING "pm121: CPU PID fan config not found.\n"); goto fail; } piddata = (struct smu_sdbp_cpupiddata *)&hdr[1]; /* Get the FVT params for operating point 0 (the only supported one * for now) in order to get tmax */ hdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); if (hdr) { fvt = (struct smu_sdbp_fvt *)&hdr[1]; tmax = ((s32)fvt->maxtemp) << 16; } else tmax = 0x5e0000; /* 94 degree default */ /* Alloc & initialize state */ pm121_cpu_state = kmalloc(sizeof(struct pm121_cpu_state), GFP_KERNEL); if (pm121_cpu_state == NULL) goto fail; pm121_cpu_state->ticks = 1; /* Fill PID params */ pid_param.interval = PM121_CPU_INTERVAL; pid_param.history_len = piddata->history_len; if (pid_param.history_len > WF_CPU_PID_MAX_HISTORY) { printk(KERN_WARNING "pm121: History size overflow on " "CPU control loop (%d)\n", piddata->history_len); pid_param.history_len = WF_CPU_PID_MAX_HISTORY; } pid_param.gd = piddata->gd; pid_param.gp = piddata->gp; pid_param.gr = piddata->gr / pid_param.history_len; tdelta = ((s32)piddata->target_temp_delta) << 16; maxpow = ((s32)piddata->max_power) << 16; powadj = ((s32)piddata->power_adj) << 16; pid_param.tmax = tmax; pid_param.ttarget = tmax - tdelta; pid_param.pmaxadj = maxpow - powadj; pid_param.min = fan_cpu->ops->get_min(fan_cpu); pid_param.max = fan_cpu->ops->get_max(fan_cpu); wf_cpu_pid_init(&pm121_cpu_state->pid, &pid_param); pr_debug("pm121: CPU Fan control initialized.\n"); pr_debug(" ttarged=%d.%03d, tmax=%d.%03d, min=%d RPM, max=%d RPM,\n", FIX32TOPRINT(pid_param.ttarget), FIX32TOPRINT(pid_param.tmax), pid_param.min, pid_param.max); return; fail: printk(KERN_WARNING "pm121: CPU fan config not found, max fan speed\n"); if (controls[CPUFREQ]) wf_control_set_max(controls[CPUFREQ]); if (fan_cpu) wf_control_set_max(fan_cpu); } static void pm121_cpu_fans_tick(struct pm121_cpu_state *st) { s32 new_setpoint, temp, power; struct wf_control *fan_cpu = NULL; int rc; if (--st->ticks != 0) { if (pm121_readjust) goto readjust; return; } st->ticks = PM121_CPU_INTERVAL; fan_cpu = controls[FAN_CPU]; rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp); if (rc) { printk(KERN_WARNING "pm121: CPU temp sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power); if (rc) { printk(KERN_WARNING "pm121: CPU power sensor error %d\n", rc); pm121_failure_state |= FAILURE_SENSOR; return; } pr_debug("pm121: CPU Fans tick ! CPU temp: %d.%03d°C, power: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(power)); if (temp > st->pid.param.tmax) pm121_failure_state |= FAILURE_OVERTEMP; new_setpoint = wf_cpu_pid_run(&st->pid, power, temp); /* correction */ new_setpoint = pm121_correct(new_setpoint, FAN_CPU, st->pid.param.min); /* connected correction */ new_setpoint = pm121_connect(FAN_CPU, new_setpoint); if (st->setpoint == new_setpoint) return; st->setpoint = new_setpoint; pr_debug("pm121: CPU corrected setpoint: %d RPM\n", (int)new_setpoint); readjust: if (fan_cpu && pm121_failure_state == 0) { rc = fan_cpu->ops->set_value(fan_cpu, st->setpoint); if (rc) { printk(KERN_WARNING "pm121: %s fan error %d\n", fan_cpu->name, rc); pm121_failure_state |= FAILURE_FAN; } } } /* * ****** Common ****** * */ static void pm121_tick(void) { unsigned int last_failure = pm121_failure_state; unsigned int new_failure; s32 total_power; int i; if (!pm121_started) { pr_debug("pm121: creating control loops !\n"); for (i = 0; i < N_LOOPS; i++) pm121_create_sys_fans(i); pm121_create_cpu_fans(); pm121_started = 1; } /* skipping ticks */ if (pm121_skipping && --pm121_skipping) return; /* compute average power */ total_power = 0; for (i = 0; i < pm121_cpu_state->pid.param.history_len; i++) total_power += pm121_cpu_state->pid.powers[i]; average_power = total_power / pm121_cpu_state->pid.param.history_len; pm121_failure_state = 0; for (i = 0 ; i < N_LOOPS; i++) { if (pm121_sys_state[i]) pm121_sys_fans_tick(i); } if (pm121_cpu_state) pm121_cpu_fans_tick(pm121_cpu_state); pm121_readjust = 0; new_failure = pm121_failure_state & ~last_failure; /* If entering failure mode, clamp cpufreq and ramp all * fans to full speed. */ if (pm121_failure_state && !last_failure) { for (i = 0; i < N_CONTROLS; i++) { if (controls[i]) wf_control_set_max(controls[i]); } } /* If leaving failure mode, unclamp cpufreq and readjust * all fans on next iteration */ if (!pm121_failure_state && last_failure) { if (controls[CPUFREQ]) wf_control_set_min(controls[CPUFREQ]); pm121_readjust = 1; } /* Overtemp condition detected, notify and start skipping a couple * ticks to let the temperature go down */ if (new_failure & FAILURE_OVERTEMP) { wf_set_overtemp(); pm121_skipping = 2; } /* We only clear the overtemp condition if overtemp is cleared * _and_ no other failure is present. Since a sensor error will * clear the overtemp condition (can't measure temperature) at * the control loop levels, but we don't want to keep it clear * here in this case */ if (new_failure == 0 && last_failure & FAILURE_OVERTEMP) wf_clear_overtemp(); } static struct wf_control* pm121_register_control(struct wf_control *ct, const char *match, unsigned int id) { if (controls[id] == NULL && !strcmp(ct->name, match)) { if (wf_get_control(ct) == 0) controls[id] = ct; } return controls[id]; } static void pm121_new_control(struct wf_control *ct) { int all = 1; if (pm121_all_controls_ok) return; all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all; all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all; all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all; all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all; if (all) pm121_all_controls_ok = 1; } static struct wf_sensor* pm121_register_sensor(struct wf_sensor *sensor, const char *match, struct wf_sensor **var) { if (*var == NULL && !strcmp(sensor->name, match)) { if (wf_get_sensor(sensor) == 0) *var = sensor; } return *var; } static void pm121_new_sensor(struct wf_sensor *sr) { int all = 1; if (pm121_all_sensors_ok) return; all = pm121_register_sensor(sr, "cpu-temp", &sensor_cpu_temp) && all; all = pm121_register_sensor(sr, "cpu-current", &sensor_cpu_current) && all; all = pm121_register_sensor(sr, "cpu-voltage", &sensor_cpu_voltage) && all; all = pm121_register_sensor(sr, "cpu-power", &sensor_cpu_power) && all; all = pm121_register_sensor(sr, "hard-drive-temp", &sensor_hard_drive_temp) && all; all = pm121_register_sensor(sr, "optical-drive-temp", &sensor_optical_drive_temp) && all; all = pm121_register_sensor(sr, "incoming-air-temp", &sensor_incoming_air_temp) && all; all = pm121_register_sensor(sr, "north-bridge-temp", &sensor_north_bridge_temp) && all; all = pm121_register_sensor(sr, "gpu-temp", &sensor_gpu_temp) && all; if (all) pm121_all_sensors_ok = 1; } static int pm121_notify(struct notifier_block *self, unsigned long event, void *data) { switch (event) { case WF_EVENT_NEW_CONTROL: pr_debug("pm121: new control %s detected\n", ((struct wf_control *)data)->name); pm121_new_control(data); break; case WF_EVENT_NEW_SENSOR: pr_debug("pm121: new sensor %s detected\n", ((struct wf_sensor *)data)->name); pm121_new_sensor(data); break; case WF_EVENT_TICK: if (pm121_all_controls_ok && pm121_all_sensors_ok) pm121_tick(); break; } return 0; } static struct notifier_block pm121_events = { .notifier_call = pm121_notify, }; static int pm121_init_pm(void) { const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); if (hdr != 0) { struct smu_sdbp_sensortree *st = (struct smu_sdbp_sensortree *)&hdr[1]; pm121_mach_model = st->model_id; } pm121_connection = &pm121_connections[pm121_mach_model - 2]; printk(KERN_INFO "pm121: Initializing for iMac G5 iSight model ID %d\n", pm121_mach_model); return 0; } static int pm121_probe(struct platform_device *ddev) { wf_register_client(&pm121_events); return 0; } static int __devexit pm121_remove(struct platform_device *ddev) { wf_unregister_client(&pm121_events); return 0; } static struct platform_driver pm121_driver = { .probe = pm121_probe, .remove = __devexit_p(pm121_remove), .driver = { .name = "windfarm", .bus = &platform_bus_type, }, }; static int __init pm121_init(void) { int rc = -ENODEV; if (of_machine_is_compatible("PowerMac12,1")) rc = pm121_init_pm(); if (rc == 0) { request_module("windfarm_smu_controls"); request_module("windfarm_smu_sensors"); request_module("windfarm_smu_sat"); request_module("windfarm_lm75_sensor"); request_module("windfarm_max6690_sensor"); request_module("windfarm_cpufreq_clamp"); platform_driver_register(&pm121_driver); } return rc; } static void __exit pm121_exit(void) { platform_driver_unregister(&pm121_driver); } module_init(pm121_init); module_exit(pm121_exit); MODULE_AUTHOR("Étienne Bersac <bersace@gmail.com>"); MODULE_DESCRIPTION("Thermal control logic for iMac G5 (iSight)"); MODULE_LICENSE("GPL");
gpl-2.0
shakalaca/ASUS_ZenFone_A500KL
kernel/arch/um/drivers/stdio_console.c
4626
4931
/* * Copyright (C) 2000, 2001 Jeff Dike (jdike@karaya.com) * Licensed under the GPL */ #include "linux/posix_types.h" #include "linux/tty.h" #include "linux/tty_flip.h" #include "linux/types.h" #include "linux/major.h" #include "linux/kdev_t.h" #include "linux/console.h" #include "linux/string.h" #include "linux/sched.h" #include "linux/list.h" #include "linux/init.h" #include "linux/interrupt.h" #include "linux/slab.h" #include "linux/hardirq.h" #include "asm/current.h" #include "asm/irq.h" #include "stdio_console.h" #include "chan.h" #include "irq_user.h" #include "mconsole_kern.h" #include "init.h" #define MAX_TTYS (16) static void stdio_announce(char *dev_name, int dev) { printk(KERN_INFO "Virtual console %d assigned device '%s'\n", dev, dev_name); } /* Almost const, except that xterm_title may be changed in an initcall */ static struct chan_opts opts = { .announce = stdio_announce, .xterm_title = "Virtual Console #%d", .raw = 1, }; static int con_config(char *str, char **error_out); static int con_get_config(char *dev, char *str, int size, char **error_out); static int con_remove(int n, char **con_remove); /* Const, except for .mc.list */ static struct line_driver driver = { .name = "UML console", .device_name = "tty", .major = TTY_MAJOR, .minor_start = 0, .type = TTY_DRIVER_TYPE_CONSOLE, .subtype = SYSTEM_TYPE_CONSOLE, .read_irq = CONSOLE_IRQ, .read_irq_name = "console", .write_irq = CONSOLE_WRITE_IRQ, .write_irq_name = "console-write", .mc = { .list = LIST_HEAD_INIT(driver.mc.list), .name = "con", .config = con_config, .get_config = con_get_config, .id = line_id, .remove = con_remove, }, }; /* The array is initialized by line_init, at initcall time. The * elements are locked individually as needed. */ static char *vt_conf[MAX_TTYS]; static char *def_conf; static struct line vts[MAX_TTYS]; static int con_config(char *str, char **error_out) { return line_config(vts, ARRAY_SIZE(vts), str, &opts, error_out); } static int con_get_config(char *dev, char *str, int size, char **error_out) { return line_get_config(dev, vts, ARRAY_SIZE(vts), str, size, error_out); } static int con_remove(int n, char **error_out) { return line_remove(vts, ARRAY_SIZE(vts), n, error_out); } static int con_open(struct tty_struct *tty, struct file *filp) { int err = line_open(vts, tty); if (err) printk(KERN_ERR "Failed to open console %d, err = %d\n", tty->index, err); return err; } /* Set in an initcall, checked in an exitcall */ static int con_init_done = 0; static const struct tty_operations console_ops = { .open = con_open, .close = line_close, .write = line_write, .put_char = line_put_char, .write_room = line_write_room, .chars_in_buffer = line_chars_in_buffer, .flush_buffer = line_flush_buffer, .flush_chars = line_flush_chars, .set_termios = line_set_termios, .ioctl = line_ioctl, .throttle = line_throttle, .unthrottle = line_unthrottle, }; static void uml_console_write(struct console *console, const char *string, unsigned len) { struct line *line = &vts[console->index]; unsigned long flags; spin_lock_irqsave(&line->lock, flags); console_write_chan(line->chan_out, string, len); spin_unlock_irqrestore(&line->lock, flags); } static struct tty_driver *uml_console_device(struct console *c, int *index) { *index = c->index; return driver.driver; } static int uml_console_setup(struct console *co, char *options) { struct line *line = &vts[co->index]; return console_open_chan(line, co); } /* No locking for register_console call - relies on single-threaded initcalls */ static struct console stdiocons = { .name = "tty", .write = uml_console_write, .device = uml_console_device, .setup = uml_console_setup, .flags = CON_PRINTBUFFER|CON_ANYTIME, .index = -1, }; static int stdio_init(void) { char *new_title; int err; int i; err = register_lines(&driver, &console_ops, vts, ARRAY_SIZE(vts)); if (err) return err; printk(KERN_INFO "Initialized stdio console driver\n"); new_title = add_xterm_umid(opts.xterm_title); if(new_title != NULL) opts.xterm_title = new_title; for (i = 0; i < MAX_TTYS; i++) { char *error; char *s = vt_conf[i]; if (!s) s = def_conf; if (!s) s = i ? CONFIG_CON_CHAN : CONFIG_CON_ZERO_CHAN; if (setup_one_line(vts, i, s, &opts, &error)) printk(KERN_ERR "setup_one_line failed for " "device %d : %s\n", i, error); } con_init_done = 1; register_console(&stdiocons); return 0; } late_initcall(stdio_init); static void console_exit(void) { if (!con_init_done) return; close_lines(vts, ARRAY_SIZE(vts)); } __uml_exitcall(console_exit); static int console_chan_setup(char *str) { line_setup(vt_conf, MAX_TTYS, &def_conf, str, "console"); return 1; } __setup("con", console_chan_setup); __channel_help(console_chan_setup, "con");
gpl-2.0
MasterChief87/android_kernel_zte_draconis
drivers/staging/usbip/userspace/src/usbip.c
7954
4057
/* * command structure borrowed from udev * (git://git.kernel.org/pub/scm/linux/hotplug/udev.git) * * Copyright (C) 2011 matt mooney <mfm@muteddisk.com> * 2005-2007 Takahiro Hirofuchi * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include <syslog.h> #include "usbip_common.h" #include "usbip.h" static int usbip_help(int argc, char *argv[]); static int usbip_version(int argc, char *argv[]); static const char usbip_version_string[] = PACKAGE_STRING; static const char usbip_usage_string[] = "usbip [--debug] [--log] [version]\n" " [help] <command> <args>\n"; static void usbip_usage(void) { printf("usage: %s", usbip_usage_string); } struct command { const char *name; int (*fn)(int argc, char *argv[]); const char *help; void (*usage)(void); }; static const struct command cmds[] = { { .name = "help", .fn = usbip_help, .help = NULL, .usage = NULL }, { .name = "version", .fn = usbip_version, .help = NULL, .usage = NULL }, { .name = "attach", .fn = usbip_attach, .help = "Attach a remote USB device", .usage = usbip_attach_usage }, { .name = "detach", .fn = usbip_detach, .help = "Detach a remote USB device", .usage = usbip_detach_usage }, { .name = "list", .fn = usbip_list, .help = "List exportable or local USB devices", .usage = usbip_list_usage }, { .name = "bind", .fn = usbip_bind, .help = "Bind device to " USBIP_HOST_DRV_NAME ".ko", .usage = usbip_bind_usage }, { .name = "unbind", .fn = usbip_unbind, .help = "Unbind device from " USBIP_HOST_DRV_NAME ".ko", .usage = usbip_unbind_usage }, { NULL, NULL, NULL, NULL } }; static int usbip_help(int argc, char *argv[]) { const struct command *cmd; int i; int ret = 0; if (argc > 1 && argv++) { for (i = 0; cmds[i].name != NULL; i++) if (!strcmp(cmds[i].name, argv[0]) && cmds[i].usage) { cmds[i].usage(); goto done; } ret = -1; } usbip_usage(); printf("\n"); for (cmd = cmds; cmd->name != NULL; cmd++) if (cmd->help != NULL) printf(" %-10s %s\n", cmd->name, cmd->help); printf("\n"); done: return ret; } static int usbip_version(int argc, char *argv[]) { (void) argc; (void) argv; printf(PROGNAME " (%s)\n", usbip_version_string); return 0; } static int run_command(const struct command *cmd, int argc, char *argv[]) { dbg("running command: `%s'", cmd->name); return cmd->fn(argc, argv); } int main(int argc, char *argv[]) { static const struct option opts[] = { { "debug", no_argument, NULL, 'd' }, { "log", no_argument, NULL, 'l' }, { NULL, 0, NULL, 0 } }; char *cmd; int opt; int i, rc = -1; usbip_use_stderr = 1; opterr = 0; for (;;) { opt = getopt_long(argc, argv, "+d", opts, NULL); if (opt == -1) break; switch (opt) { case 'd': usbip_use_debug = 1; break; case 'l': usbip_use_syslog = 1; openlog("", LOG_PID, LOG_USER); break; case '?': printf("usbip: invalid option\n"); default: usbip_usage(); goto out; } } cmd = argv[optind]; if (cmd) { for (i = 0; cmds[i].name != NULL; i++) if (!strcmp(cmds[i].name, cmd)) { argc -= optind; argv += optind; optind = 0; rc = run_command(&cmds[i], argc, argv); goto out; } } /* invalid command */ usbip_help(0, NULL); out: return (rc > -1 ? EXIT_SUCCESS : EXIT_FAILURE); }
gpl-2.0
andreturket/android_kernel_d1_p1
arch/mips/mti-malta/malta-amon.c
9234
2176
/* * Copyright (C) 2007 MIPS Technologies, Inc. * All rights reserved. * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Arbitrary Monitor interface */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/smp.h> #include <asm/addrspace.h> #include <asm/mips-boards/launch.h> #include <asm/mipsmtregs.h> int amon_cpu_avail(int cpu) { struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); if (cpu < 0 || cpu >= NCPULAUNCH) { pr_debug("avail: cpu%d is out of range\n", cpu); return 0; } launch += cpu; if (!(launch->flags & LAUNCH_FREADY)) { pr_debug("avail: cpu%d is not ready\n", cpu); return 0; } if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) { pr_debug("avail: too late.. cpu%d is already gone\n", cpu); return 0; } return 1; } void amon_cpu_start(int cpu, unsigned long pc, unsigned long sp, unsigned long gp, unsigned long a0) { volatile struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); if (!amon_cpu_avail(cpu)) return; if (cpu == smp_processor_id()) { pr_debug("launch: I am cpu%d!\n", cpu); return; } launch += cpu; pr_debug("launch: starting cpu%d\n", cpu); launch->pc = pc; launch->gp = gp; launch->sp = sp; launch->a0 = a0; smp_wmb(); /* Target must see parameters before go */ launch->flags |= LAUNCH_FGO; smp_wmb(); /* Target must see go before we poll */ while ((launch->flags & LAUNCH_FGONE) == 0) ; smp_rmb(); /* Target will be updating flags soon */ pr_debug("launch: cpu%d gone!\n", cpu); }
gpl-2.0
wzhy90/android_kernel_sony_msm8974ab
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_qmath.c
10002
8107
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "phy_qmath.h" /* * Description: This function make 16 bit unsigned multiplication. * To fit the output into 16 bits the 32 bit multiplication result is right * shifted by 16 bits. */ u16 qm_mulu16(u16 op1, u16 op2) { return (u16) (((u32) op1 * (u32) op2) >> 16); } /* * Description: This function make 16 bit multiplication and return the result * in 16 bits. To fit the multiplication result into 16 bits the multiplication * result is right shifted by 15 bits. Right shifting 15 bits instead of 16 bits * is done to remove the extra sign bit formed due to the multiplication. * When both the 16bit inputs are 0x8000 then the output is saturated to * 0x7fffffff. */ s16 qm_muls16(s16 op1, s16 op2) { s32 result; if (op1 == (s16) 0x8000 && op2 == (s16) 0x8000) result = 0x7fffffff; else result = ((s32) (op1) * (s32) (op2)); return (s16) (result >> 15); } /* * Description: This function add two 32 bit numbers and return the 32bit * result. If the result overflow 32 bits, the output will be saturated to * 32bits. */ s32 qm_add32(s32 op1, s32 op2) { s32 result; result = op1 + op2; if (op1 < 0 && op2 < 0 && result > 0) result = 0x80000000; else if (op1 > 0 && op2 > 0 && result < 0) result = 0x7fffffff; return result; } /* * Description: This function add two 16 bit numbers and return the 16bit * result. If the result overflow 16 bits, the output will be saturated to * 16bits. */ s16 qm_add16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 + (s32) op2; if (temp > (s32) 0x7fff) result = (s16) 0x7fff; else if (temp < (s32) 0xffff8000) result = (s16) 0xffff8000; else result = (s16) temp; return result; } /* * Description: This function make 16 bit subtraction and return the 16bit * result. If the result overflow 16 bits, the output will be saturated to * 16bits. */ s16 qm_sub16(s16 op1, s16 op2) { s16 result; s32 temp = (s32) op1 - (s32) op2; if (temp > (s32) 0x7fff) result = (s16) 0x7fff; else if (temp < (s32) 0xffff8000) result = (s16) 0xffff8000; else result = (s16) temp; return result; } /* * Description: This function make a 32 bit saturated left shift when the * specified shift is +ve. This function will make a 32 bit right shift when * the specified shift is -ve. This function return the result after shifting * operation. */ s32 qm_shl32(s32 op, int shift) { int i; s32 result; result = op; if (shift > 31) shift = 31; else if (shift < -31) shift = -31; if (shift >= 0) { for (i = 0; i < shift; i++) result = qm_add32(result, result); } else { result = result >> (-shift); } return result; } /* * Description: This function make a 16 bit saturated left shift when the * specified shift is +ve. This function will make a 16 bit right shift when * the specified shift is -ve. This function return the result after shifting * operation. */ s16 qm_shl16(s16 op, int shift) { int i; s16 result; result = op; if (shift > 15) shift = 15; else if (shift < -15) shift = -15; if (shift > 0) { for (i = 0; i < shift; i++) result = qm_add16(result, result); } else { result = result >> (-shift); } return result; } /* * Description: This function make a 16 bit right shift when shift is +ve. * This function make a 16 bit saturated left shift when shift is -ve. This * function return the result of the shift operation. */ s16 qm_shr16(s16 op, int shift) { return qm_shl16(op, -shift); } /* * Description: This function return the number of redundant sign bits in a * 32 bit number. Example: qm_norm32(0x00000080) = 23 */ s16 qm_norm32(s32 op) { u16 u16extraSignBits; if (op == 0) { return 31; } else { u16extraSignBits = 0; while ((op >> 31) == (op >> 30)) { u16extraSignBits++; op = op << 1; } } return u16extraSignBits; } /* This table is log2(1+(i/32)) where i=[0:1:31], in q.15 format */ static const s16 log_table[] = { 0, 1455, 2866, 4236, 5568, 6863, 8124, 9352, 10549, 11716, 12855, 13968, 15055, 16117, 17156, 18173, 19168, 20143, 21098, 22034, 22952, 23852, 24736, 25604, 26455, 27292, 28114, 28922, 29717, 30498, 31267, 32024 }; #define LOG_TABLE_SIZE 32 /* log_table size */ #define LOG2_LOG_TABLE_SIZE 5 /* log2(log_table size) */ #define Q_LOG_TABLE 15 /* qformat of log_table */ #define LOG10_2 19728 /* log10(2) in q.16 */ /* * Description: * This routine takes the input number N and its q format qN and compute * the log10(N). This routine first normalizes the input no N. Then N is in * mag*(2^x) format. mag is any number in the range 2^30-(2^31 - 1). * Then log2(mag * 2^x) = log2(mag) + x is computed. From that * log10(mag * 2^x) = log2(mag * 2^x) * log10(2) is computed. * This routine looks the log2 value in the table considering * LOG2_LOG_TABLE_SIZE+1 MSBs. As the MSB is always 1, only next * LOG2_OF_LOG_TABLE_SIZE MSBs are used for table lookup. Next 16 MSBs are used * for interpolation. * Inputs: * N - number to which log10 has to be found. * qN - q format of N * log10N - address where log10(N) will be written. * qLog10N - address where log10N qformat will be written. * Note/Problem: * For accurate results input should be in normalized or near normalized form. */ void qm_log10(s32 N, s16 qN, s16 *log10N, s16 *qLog10N) { s16 s16norm, s16tableIndex, s16errorApproximation; u16 u16offset; s32 s32log; /* normalize the N. */ s16norm = qm_norm32(N); N = N << s16norm; /* The qformat of N after normalization. * -30 is added to treat the no as between 1.0 to 2.0 * i.e. after adding the -30 to the qformat the decimal point will be * just rigtht of the MSB. (i.e. after sign bit and 1st MSB). i.e. * at the right side of 30th bit. */ qN = qN + s16norm - 30; /* take the table index as the LOG2_OF_LOG_TABLE_SIZE bits right of the * MSB */ s16tableIndex = (s16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE))); /* remove the MSB. the MSB is always 1 after normalization. */ s16tableIndex = s16tableIndex & (s16) ((1 << LOG2_LOG_TABLE_SIZE) - 1); /* remove the (1+LOG2_OF_LOG_TABLE_SIZE) MSBs in the N. */ N = N & ((1 << (32 - (2 + LOG2_LOG_TABLE_SIZE))) - 1); /* take the offset as the 16 MSBS after table index. */ u16offset = (u16) (N >> (32 - (2 + LOG2_LOG_TABLE_SIZE + 16))); /* look the log value in the table. */ s32log = log_table[s16tableIndex]; /* q.15 format */ /* interpolate using the offset. q.15 format. */ s16errorApproximation = (s16) qm_mulu16(u16offset, (u16) (log_table[s16tableIndex + 1] - log_table[s16tableIndex])); /* q.15 format */ s32log = qm_add16((s16) s32log, s16errorApproximation); /* adjust for the qformat of the N as * log2(mag * 2^x) = log2(mag) + x */ s32log = qm_add32(s32log, ((s32) -qN) << 15); /* q.15 format */ /* normalize the result. */ s16norm = qm_norm32(s32log); /* bring all the important bits into lower 16 bits */ /* q.15+s16norm-16 format */ s32log = qm_shl32(s32log, s16norm - 16); /* compute the log10(N) by multiplying log2(N) with log10(2). * as log10(mag * 2^x) = log2(mag * 2^x) * log10(2) * log10N in q.15+s16norm-16+1 (LOG10_2 is in q.16) */ *log10N = qm_muls16((s16) s32log, (s16) LOG10_2); /* write the q format of the result. */ *qLog10N = 15 + s16norm - 16 + 1; return; }
gpl-2.0
genopublic/kernel_u8800
drivers/mtd/maps/map_funcs.c
14866
1078
/* * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS * is enabled. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/map.h> #include <linux/mtd/xip.h> static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs) { return inline_map_read(map, ofs); } static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) { inline_map_write(map, datum, ofs); } static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { inline_map_copy_from(map, to, from, len); } static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { inline_map_copy_to(map, to, from, len); } void simple_map_init(struct map_info *map) { BUG_ON(!map_bankwidth_supported(map->bankwidth)); map->read = simple_map_read; map->write = simple_map_write; map->copy_from = simple_map_copy_from; map->copy_to = simple_map_copy_to; } EXPORT_SYMBOL(simple_map_init); MODULE_LICENSE("GPL");
gpl-2.0
petterreinholdtsen/cinelerra-hv
quicktime/thirdparty/ffmpeg-0.6.1/libavcodec/x86/vp3dsp_mmx.c
19
17244
/* * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * MMX-optimized functions cribbed from the original VP3 source code. */ #include "libavutil/x86_cpu.h" #include "libavcodec/dsputil.h" #include "dsputil_mmx.h" #include "vp3dsp_mmx.h" extern const uint16_t ff_vp3_idct_data[]; // this is off by one or two for some cases when filter_limit is greater than 63 // in: p0 in mm6, p1 in mm4, p2 in mm2, p3 in mm1 // out: p1 in mm4, p2 in mm3 #define VP3_LOOP_FILTER(flim) \ "movq %%mm6, %%mm7 \n\t" \ "pand "MANGLE(ff_pb_7 )", %%mm6 \n\t" /* p0&7 */ \ "psrlw $3, %%mm7 \n\t" \ "pand "MANGLE(ff_pb_1F)", %%mm7 \n\t" /* p0>>3 */ \ "movq %%mm2, %%mm3 \n\t" /* mm3 = p2 */ \ "pxor %%mm4, %%mm2 \n\t" \ "pand "MANGLE(ff_pb_1 )", %%mm2 \n\t" /* (p2^p1)&1 */ \ "movq %%mm2, %%mm5 \n\t" \ "paddb %%mm2, %%mm2 \n\t" \ "paddb %%mm5, %%mm2 \n\t" /* 3*(p2^p1)&1 */ \ "paddb %%mm6, %%mm2 \n\t" /* extra bits lost in shifts */ \ "pcmpeqb %%mm0, %%mm0 \n\t" \ "pxor %%mm0, %%mm1 \n\t" /* 255 - p3 */ \ "pavgb %%mm2, %%mm1 \n\t" /* (256 - p3 + extrabits) >> 1 */ \ "pxor %%mm4, %%mm0 \n\t" /* 255 - p1 */ \ "pavgb %%mm3, %%mm0 \n\t" /* (256 + p2-p1) >> 1 */ \ "paddb "MANGLE(ff_pb_3 )", %%mm1 \n\t" \ "pavgb %%mm0, %%mm1 \n\t" /* 128+2+( p2-p1 - p3) >> 2 */ \ "pavgb %%mm0, %%mm1 \n\t" /* 128+1+(3*(p2-p1) - p3) >> 3 */ \ "paddusb %%mm1, %%mm7 \n\t" /* d+128+1 */ \ "movq "MANGLE(ff_pb_81)", %%mm6 \n\t" \ "psubusb %%mm7, %%mm6 \n\t" \ "psubusb "MANGLE(ff_pb_81)", %%mm7 \n\t" \ \ "movq "#flim", %%mm5 \n\t" \ "pminub %%mm5, %%mm6 \n\t" \ "pminub %%mm5, %%mm7 \n\t" \ "movq %%mm6, %%mm0 \n\t" \ "movq %%mm7, %%mm1 \n\t" \ "paddb %%mm6, %%mm6 \n\t" \ "paddb %%mm7, %%mm7 \n\t" \ "pminub %%mm5, %%mm6 \n\t" \ "pminub %%mm5, %%mm7 \n\t" \ "psubb %%mm0, %%mm6 \n\t" \ "psubb %%mm1, %%mm7 \n\t" \ "paddusb %%mm7, %%mm4 \n\t" \ "psubusb %%mm6, %%mm4 \n\t" \ "psubusb %%mm7, %%mm3 \n\t" \ "paddusb %%mm6, %%mm3 \n\t" #define STORE_4_WORDS(dst0, dst1, dst2, dst3, mm) \ "movd "#mm", %0 \n\t" \ "movw %w0, -1"#dst0" \n\t" \ "psrlq $32, "#mm" \n\t" \ "shr $16, %0 \n\t" \ "movw %w0, -1"#dst1" \n\t" \ "movd "#mm", %0 \n\t" \ "movw %w0, -1"#dst2" \n\t" \ "shr $16, %0 \n\t" \ "movw %w0, -1"#dst3" \n\t" void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values) { __asm__ volatile( "movq %0, %%mm6 \n\t" "movq %1, %%mm4 \n\t" "movq %2, %%mm2 \n\t" "movq %3, %%mm1 \n\t" VP3_LOOP_FILTER(%4) "movq %%mm4, %1 \n\t" "movq %%mm3, %2 \n\t" : "+m" (*(uint64_t*)(src - 2*stride)), "+m" (*(uint64_t*)(src - 1*stride)), "+m" (*(uint64_t*)(src + 0*stride)), "+m" (*(uint64_t*)(src + 1*stride)) : "m"(*(uint64_t*)(bounding_values+129)) ); } void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values) { x86_reg tmp; __asm__ volatile( "movd -2(%1), %%mm6 \n\t" "movd -2(%1,%3), %%mm0 \n\t" "movd -2(%1,%3,2), %%mm1 \n\t" "movd -2(%1,%4), %%mm4 \n\t" TRANSPOSE8x4(%%mm6, %%mm0, %%mm1, %%mm4, -2(%2), -2(%2,%3), -2(%2,%3,2), -2(%2,%4), %%mm2) VP3_LOOP_FILTER(%5) SBUTTERFLY(%%mm4, %%mm3, %%mm5, bw, q) STORE_4_WORDS((%1), (%1,%3), (%1,%3,2), (%1,%4), %%mm4) STORE_4_WORDS((%2), (%2,%3), (%2,%3,2), (%2,%4), %%mm5) : "=&r"(tmp) : "r"(src), "r"(src+4*stride), "r"((x86_reg)stride), "r"((x86_reg)3*stride), "m"(*(uint64_t*)(bounding_values+129)) : "memory" ); } /* from original comments: The Macro does IDct on 4 1-D Dcts */ #define BeginIDCT() \ "movq "I(3)", %%mm2 \n\t" \ "movq "C(3)", %%mm6 \n\t" \ "movq %%mm2, %%mm4 \n\t" \ "movq "J(5)", %%mm7 \n\t" \ "pmulhw %%mm6, %%mm4 \n\t" /* r4 = c3*i3 - i3 */ \ "movq "C(5)", %%mm1 \n\t" \ "pmulhw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 - i5 */ \ "movq %%mm1, %%mm5 \n\t" \ "pmulhw %%mm2, %%mm1 \n\t" /* r1 = c5*i3 - i3 */ \ "movq "I(1)", %%mm3 \n\t" \ "pmulhw %%mm7, %%mm5 \n\t" /* r5 = c5*i5 - i5 */ \ "movq "C(1)", %%mm0 \n\t" \ "paddw %%mm2, %%mm4 \n\t" /* r4 = c3*i3 */ \ "paddw %%mm7, %%mm6 \n\t" /* r6 = c3*i5 */ \ "paddw %%mm1, %%mm2 \n\t" /* r2 = c5*i3 */ \ "movq "J(7)", %%mm1 \n\t" \ "paddw %%mm5, %%mm7 \n\t" /* r7 = c5*i5 */ \ "movq %%mm0, %%mm5 \n\t" /* r5 = c1 */ \ "pmulhw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 - i1 */ \ "paddsw %%mm7, %%mm4 \n\t" /* r4 = C = c3*i3 + c5*i5 */ \ "pmulhw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 - i7 */ \ "movq "C(7)", %%mm7 \n\t" \ "psubsw %%mm2, %%mm6 \n\t" /* r6 = D = c3*i5 - c5*i3 */ \ "paddw %%mm3, %%mm0 \n\t" /* r0 = c1*i1 */ \ "pmulhw %%mm7, %%mm3 \n\t" /* r3 = c7*i1 */ \ "movq "I(2)", %%mm2 \n\t" \ "pmulhw %%mm1, %%mm7 \n\t" /* r7 = c7*i7 */ \ "paddw %%mm1, %%mm5 \n\t" /* r5 = c1*i7 */ \ "movq %%mm2, %%mm1 \n\t" /* r1 = i2 */ \ "pmulhw "C(2)", %%mm2 \n\t" /* r2 = c2*i2 - i2 */ \ "psubsw %%mm5, %%mm3 \n\t" /* r3 = B = c7*i1 - c1*i7 */ \ "movq "J(6)", %%mm5 \n\t" \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = A = c1*i1 + c7*i7 */ \ "movq %%mm5, %%mm7 \n\t" /* r7 = i6 */ \ "psubsw %%mm4, %%mm0 \n\t" /* r0 = A - C */ \ "pmulhw "C(2)", %%mm5 \n\t" /* r5 = c2*i6 - i6 */ \ "paddw %%mm1, %%mm2 \n\t" /* r2 = c2*i2 */ \ "pmulhw "C(6)", %%mm1 \n\t" /* r1 = c6*i2 */ \ "paddsw %%mm4, %%mm4 \n\t" /* r4 = C + C */ \ "paddsw %%mm0, %%mm4 \n\t" /* r4 = C. = A + C */ \ "psubsw %%mm6, %%mm3 \n\t" /* r3 = B - D */ \ "paddw %%mm7, %%mm5 \n\t" /* r5 = c2*i6 */ \ "paddsw %%mm6, %%mm6 \n\t" /* r6 = D + D */ \ "pmulhw "C(6)", %%mm7 \n\t" /* r7 = c6*i6 */ \ "paddsw %%mm3, %%mm6 \n\t" /* r6 = D. = B + D */ \ "movq %%mm4, "I(1)"\n\t" /* save C. at I(1) */ \ "psubsw %%mm5, %%mm1 \n\t" /* r1 = H = c6*i2 - c2*i6 */ \ "movq "C(4)", %%mm4 \n\t" \ "movq %%mm3, %%mm5 \n\t" /* r5 = B - D */ \ "pmulhw %%mm4, %%mm3 \n\t" /* r3 = (c4 - 1) * (B - D) */ \ "paddsw %%mm2, %%mm7 \n\t" /* r3 = (c4 - 1) * (B - D) */ \ "movq %%mm6, "I(2)"\n\t" /* save D. at I(2) */ \ "movq %%mm0, %%mm2 \n\t" /* r2 = A - C */ \ "movq "I(0)", %%mm6 \n\t" \ "pmulhw %%mm4, %%mm0 \n\t" /* r0 = (c4 - 1) * (A - C) */ \ "paddw %%mm3, %%mm5 \n\t" /* r5 = B. = c4 * (B - D) */ \ "movq "J(4)", %%mm3 \n\t" \ "psubsw %%mm1, %%mm5 \n\t" /* r5 = B.. = B. - H */ \ "paddw %%mm0, %%mm2 \n\t" /* r0 = A. = c4 * (A - C) */ \ "psubsw %%mm3, %%mm6 \n\t" /* r6 = i0 - i4 */ \ "movq %%mm6, %%mm0 \n\t" \ "pmulhw %%mm4, %%mm6 \n\t" /* r6 = (c4 - 1) * (i0 - i4) */ \ "paddsw %%mm3, %%mm3 \n\t" /* r3 = i4 + i4 */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H + H */ \ "paddsw %%mm0, %%mm3 \n\t" /* r3 = i0 + i4 */ \ "paddsw %%mm5, %%mm1 \n\t" /* r1 = H. = B + H */ \ "pmulhw %%mm3, %%mm4 \n\t" /* r4 = (c4 - 1) * (i0 + i4) */ \ "paddsw %%mm0, %%mm6 \n\t" /* r6 = F = c4 * (i0 - i4) */ \ "psubsw %%mm2, %%mm6 \n\t" /* r6 = F. = F - A. */ \ "paddsw %%mm2, %%mm2 \n\t" /* r2 = A. + A. */ \ "movq "I(1)", %%mm0 \n\t" /* r0 = C. */ \ "paddsw %%mm6, %%mm2 \n\t" /* r2 = A.. = F + A. */ \ "paddw %%mm3, %%mm4 \n\t" /* r4 = E = c4 * (i0 + i4) */ \ "psubsw %%mm1, %%mm2 \n\t" /* r2 = R2 = A.. - H. */ /* RowIDCT gets ready to transpose */ #define RowIDCT() \ BeginIDCT() \ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \ "paddsw %%mm4, %%mm7 \n\t" /* r1 = R1 = A.. + H. */ \ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \ "paddsw %%mm3, %%mm3 \n\t" \ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \ "paddsw %%mm5, %%mm5 \n\t" \ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \ "paddsw %%mm0, %%mm0 \n\t" \ "movq %%mm1, "I(1)"\n\t" /* save R1 */ \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */ /* Column IDCT normalizes and stores final results */ #define ColumnIDCT() \ BeginIDCT() \ "paddsw "OC_8", %%mm2 \n\t" /* adjust R2 (and R1) for shift */ \ "paddsw %%mm1, %%mm1 \n\t" /* r1 = H. + H. */ \ "paddsw %%mm2, %%mm1 \n\t" /* r1 = R1 = A.. + H. */ \ "psraw $4, %%mm2 \n\t" /* r2 = NR2 */ \ "psubsw %%mm7, %%mm4 \n\t" /* r4 = E. = E - G */ \ "psraw $4, %%mm1 \n\t" /* r1 = NR1 */ \ "movq "I(2)", %%mm3 \n\t" /* r3 = D. */ \ "paddsw %%mm7, %%mm7 \n\t" /* r7 = G + G */ \ "movq %%mm2, "I(2)"\n\t" /* store NR2 at I2 */ \ "paddsw %%mm4, %%mm7 \n\t" /* r7 = G. = E + G */ \ "movq %%mm1, "I(1)"\n\t" /* store NR1 at I1 */ \ "psubsw %%mm3, %%mm4 \n\t" /* r4 = R4 = E. - D. */ \ "paddsw "OC_8", %%mm4 \n\t" /* adjust R4 (and R3) for shift */ \ "paddsw %%mm3, %%mm3 \n\t" /* r3 = D. + D. */ \ "paddsw %%mm4, %%mm3 \n\t" /* r3 = R3 = E. + D. */ \ "psraw $4, %%mm4 \n\t" /* r4 = NR4 */ \ "psubsw %%mm5, %%mm6 \n\t" /* r6 = R6 = F. - B.. */ \ "psraw $4, %%mm3 \n\t" /* r3 = NR3 */ \ "paddsw "OC_8", %%mm6 \n\t" /* adjust R6 (and R5) for shift */ \ "paddsw %%mm5, %%mm5 \n\t" /* r5 = B.. + B.. */ \ "paddsw %%mm6, %%mm5 \n\t" /* r5 = R5 = F. + B.. */ \ "psraw $4, %%mm6 \n\t" /* r6 = NR6 */ \ "movq %%mm4, "J(4)"\n\t" /* store NR4 at J4 */ \ "psraw $4, %%mm5 \n\t" /* r5 = NR5 */ \ "movq %%mm3, "I(3)"\n\t" /* store NR3 at I3 */ \ "psubsw %%mm0, %%mm7 \n\t" /* r7 = R7 = G. - C. */ \ "paddsw "OC_8", %%mm7 \n\t" /* adjust R7 (and R0) for shift */ \ "paddsw %%mm0, %%mm0 \n\t" /* r0 = C. + C. */ \ "paddsw %%mm7, %%mm0 \n\t" /* r0 = R0 = G. + C. */ \ "psraw $4, %%mm7 \n\t" /* r7 = NR7 */ \ "movq %%mm6, "J(6)"\n\t" /* store NR6 at J6 */ \ "psraw $4, %%mm0 \n\t" /* r0 = NR0 */ \ "movq %%mm5, "J(5)"\n\t" /* store NR5 at J5 */ \ "movq %%mm7, "J(7)"\n\t" /* store NR7 at J7 */ \ "movq %%mm0, "I(0)"\n\t" /* store NR0 at I0 */ /* Following macro does two 4x4 transposes in place. At entry (we assume): r0 = a3 a2 a1 a0 I(1) = b3 b2 b1 b0 r2 = c3 c2 c1 c0 r3 = d3 d2 d1 d0 r4 = e3 e2 e1 e0 r5 = f3 f2 f1 f0 r6 = g3 g2 g1 g0 r7 = h3 h2 h1 h0 At exit, we have: I(0) = d0 c0 b0 a0 I(1) = d1 c1 b1 a1 I(2) = d2 c2 b2 a2 I(3) = d3 c3 b3 a3 J(4) = h0 g0 f0 e0 J(5) = h1 g1 f1 e1 J(6) = h2 g2 f2 e2 J(7) = h3 g3 f3 e3 I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3. J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7. Since r1 is free at entry, we calculate the Js first. */ #define Transpose() \ "movq %%mm4, %%mm1 \n\t" /* r1 = e3 e2 e1 e0 */ \ "punpcklwd %%mm5, %%mm4 \n\t" /* r4 = f1 e1 f0 e0 */ \ "movq %%mm0, "I(0)"\n\t" /* save a3 a2 a1 a0 */ \ "punpckhwd %%mm5, %%mm1 \n\t" /* r1 = f3 e3 f2 e2 */ \ "movq %%mm6, %%mm0 \n\t" /* r0 = g3 g2 g1 g0 */ \ "punpcklwd %%mm7, %%mm6 \n\t" /* r6 = h1 g1 h0 g0 */ \ "movq %%mm4, %%mm5 \n\t" /* r5 = f1 e1 f0 e0 */ \ "punpckldq %%mm6, %%mm4 \n\t" /* r4 = h0 g0 f0 e0 = R4 */ \ "punpckhdq %%mm6, %%mm5 \n\t" /* r5 = h1 g1 f1 e1 = R5 */ \ "movq %%mm1, %%mm6 \n\t" /* r6 = f3 e3 f2 e2 */ \ "movq %%mm4, "J(4)"\n\t" \ "punpckhwd %%mm7, %%mm0 \n\t" /* r0 = h3 g3 h2 g2 */ \ "movq %%mm5, "J(5)"\n\t" \ "punpckhdq %%mm0, %%mm6 \n\t" /* r6 = h3 g3 f3 e3 = R7 */ \ "movq "I(0)", %%mm4 \n\t" /* r4 = a3 a2 a1 a0 */ \ "punpckldq %%mm0, %%mm1 \n\t" /* r1 = h2 g2 f2 e2 = R6 */ \ "movq "I(1)", %%mm5 \n\t" /* r5 = b3 b2 b1 b0 */ \ "movq %%mm4, %%mm0 \n\t" /* r0 = a3 a2 a1 a0 */ \ "movq %%mm6, "J(7)"\n\t" \ "punpcklwd %%mm5, %%mm0 \n\t" /* r0 = b1 a1 b0 a0 */ \ "movq %%mm1, "J(6)"\n\t" \ "punpckhwd %%mm5, %%mm4 \n\t" /* r4 = b3 a3 b2 a2 */ \ "movq %%mm2, %%mm5 \n\t" /* r5 = c3 c2 c1 c0 */ \ "punpcklwd %%mm3, %%mm2 \n\t" /* r2 = d1 c1 d0 c0 */ \ "movq %%mm0, %%mm1 \n\t" /* r1 = b1 a1 b0 a0 */ \ "punpckldq %%mm2, %%mm0 \n\t" /* r0 = d0 c0 b0 a0 = R0 */ \ "punpckhdq %%mm2, %%mm1 \n\t" /* r1 = d1 c1 b1 a1 = R1 */ \ "movq %%mm4, %%mm2 \n\t" /* r2 = b3 a3 b2 a2 */ \ "movq %%mm0, "I(0)"\n\t" \ "punpckhwd %%mm3, %%mm5 \n\t" /* r5 = d3 c3 d2 c2 */ \ "movq %%mm1, "I(1)"\n\t" \ "punpckhdq %%mm5, %%mm4 \n\t" /* r4 = d3 c3 b3 a3 = R3 */ \ "punpckldq %%mm5, %%mm2 \n\t" /* r2 = d2 c2 b2 a2 = R2 */ \ "movq %%mm4, "I(3)"\n\t" \ "movq %%mm2, "I(2)"\n\t" void ff_vp3_idct_mmx(int16_t *output_data) { /* eax = quantized input * ebx = dequantizer matrix * ecx = IDCT constants * M(I) = ecx + MaskOffset(0) + I * 8 * C(I) = ecx + CosineOffset(32) + (I-1) * 8 * edx = output * r0..r7 = mm0..mm7 */ #define C(x) AV_STRINGIFY(16*(x-1))"(%1)" #define OC_8 "%2" /* at this point, function has completed dequantization + dezigzag + * partial transposition; now do the idct itself */ #define I(x) AV_STRINGIFY(16* x )"(%0)" #define J(x) AV_STRINGIFY(16*(x-4) + 8)"(%0)" __asm__ volatile ( RowIDCT() Transpose() #undef I #undef J #define I(x) AV_STRINGIFY(16* x + 64)"(%0)" #define J(x) AV_STRINGIFY(16*(x-4) + 72)"(%0)" RowIDCT() Transpose() #undef I #undef J #define I(x) AV_STRINGIFY(16*x)"(%0)" #define J(x) AV_STRINGIFY(16*x)"(%0)" ColumnIDCT() #undef I #undef J #define I(x) AV_STRINGIFY(16*x + 8)"(%0)" #define J(x) AV_STRINGIFY(16*x + 8)"(%0)" ColumnIDCT() :: "r"(output_data), "r"(ff_vp3_idct_data), "m"(ff_pw_8) ); #undef I #undef J } void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_mmx(block); put_signed_pixels_clamped_mmx(block, dest, line_size); } void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block) { ff_vp3_idct_mmx(block); add_pixels_clamped_mmx(block, dest, line_size); } void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int linesize, const DCTELEM *block) { int dc = block[0]; dc = (46341*dc)>>16; dc = (46341*dc + (8<<16))>>20; __asm__ volatile( "movd %3, %%mm0 \n\t" "pshufw $0, %%mm0, %%mm0 \n\t" "pxor %%mm1, %%mm1 \n\t" "psubw %%mm0, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" #define DC_ADD \ "movq (%0), %%mm2 \n\t" \ "movq (%0,%1), %%mm3 \n\t" \ "paddusb %%mm0, %%mm2 \n\t" \ "movq (%0,%1,2), %%mm4 \n\t" \ "paddusb %%mm0, %%mm3 \n\t" \ "movq (%0,%2), %%mm5 \n\t" \ "paddusb %%mm0, %%mm4 \n\t" \ "paddusb %%mm0, %%mm5 \n\t" \ "psubusb %%mm1, %%mm2 \n\t" \ "psubusb %%mm1, %%mm3 \n\t" \ "movq %%mm2, (%0) \n\t" \ "psubusb %%mm1, %%mm4 \n\t" \ "movq %%mm3, (%0,%1) \n\t" \ "psubusb %%mm1, %%mm5 \n\t" \ "movq %%mm4, (%0,%1,2) \n\t" \ "movq %%mm5, (%0,%2) \n\t" DC_ADD "lea (%0,%1,4), %0 \n\t" DC_ADD : "+r"(dest) : "r"((x86_reg)linesize), "r"((x86_reg)3*linesize), "r"(dc) ); }
gpl-2.0
OTZIILORD/lavfilters
decoder/LAVVideo/pixconv/yuv420_yuy2.cpp
19
10901
/* * Copyright (C) 2010-2015 Hendrik Leppkes * http://www.1f0.de * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "stdafx.h" #include <emmintrin.h> #include <ppl.h> #include "pixconv_internal.h" #include "pixconv_sse2_templates.h" #define DITHER_STEPS 2 // This function converts 8x2 pixels from the source into 8x2 YUY2 pixels in the destination template <LAVPixelFormat inputFormat, int shift, int uyvy, int dithertype> __forceinline static int yuv420yuy2_convert_pixels(const uint8_t* &srcY, const uint8_t* &srcU, const uint8_t* &srcV, uint8_t* &dst, ptrdiff_t srcStrideY, ptrdiff_t srcStrideUV, ptrdiff_t dstStride, ptrdiff_t line, const uint16_t* &dithers, ptrdiff_t pos) { __m128i xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7; xmm7 = _mm_setzero_si128 (); // Shift > 0 is for 9/10 bit formats if (shift > 0) { // Load 4 U/V values from line 0/1 into registers PIXCONV_LOAD_4PIXEL16(xmm1, srcU); PIXCONV_LOAD_4PIXEL16(xmm3, srcU+srcStrideUV); PIXCONV_LOAD_4PIXEL16(xmm0, srcV); PIXCONV_LOAD_4PIXEL16(xmm2, srcV+srcStrideUV); // Interleave U and V xmm0 = _mm_unpacklo_epi16(xmm1, xmm0); /* 0V0U0V0U */ xmm2 = _mm_unpacklo_epi16(xmm3, xmm2); /* 0V0U0V0U */ } else if (inputFormat == LAVPixFmt_NV12) { // Load 4 16-bit macro pixels, which contain 4 UV samples PIXCONV_LOAD_4PIXEL16(xmm0, srcU); PIXCONV_LOAD_4PIXEL16(xmm2, srcU+srcStrideUV); // Expand to 16-bit xmm0 = _mm_unpacklo_epi8(xmm0, xmm7); /* 0V0U0V0U */ xmm2 = _mm_unpacklo_epi8(xmm2, xmm7); /* 0V0U0V0U */ } else { PIXCONV_LOAD_4PIXEL8(xmm1, srcU); PIXCONV_LOAD_4PIXEL8(xmm3, srcU+srcStrideUV); PIXCONV_LOAD_4PIXEL8(xmm0, srcV); PIXCONV_LOAD_4PIXEL8(xmm2, srcV+srcStrideUV); // Interleave U and V xmm0 = _mm_unpacklo_epi8(xmm1, xmm0); /* VUVU0000 */ xmm2 = _mm_unpacklo_epi8(xmm3, xmm2); /* VUVU0000 */ // Expand to 16-bit xmm0 = _mm_unpacklo_epi8(xmm0, xmm7); /* 0V0U0V0U */ xmm2 = _mm_unpacklo_epi8(xmm2, xmm7); /* 0V0U0V0U */ } // xmm0/xmm2 contain 4 interleaved U/V samples from two lines each in the 16bit parts, still in their native bitdepth // Chroma upsampling if (shift > 0 || inputFormat == LAVPixFmt_NV12) { srcU += 8; srcV += 8; } else { srcU += 4; srcV += 4; } xmm1 = xmm0; xmm1 = _mm_add_epi16(xmm1, xmm0); /* 2x line 0 */ xmm1 = _mm_add_epi16(xmm1, xmm0); /* 3x line 0 */ xmm1 = _mm_add_epi16(xmm1, xmm2); /* 3x line 0 + line 1 (10bit) */ xmm3 = xmm2; xmm3 = _mm_add_epi16(xmm3, xmm2); /* 2x line 1 */ xmm3 = _mm_add_epi16(xmm3, xmm2); /* 3x line 1 */ xmm3 = _mm_add_epi16(xmm3, xmm0); /* 3x line 1 + line 0 (10bit) */ // After this step, xmm1 and xmm3 contain 8 16-bit values, V and U interleaved. For 4:2:0, filling input+2 bits (10, 11, 12). // Load Y if (shift > 0) { // Load 8 Y values from line 0/1 into registers PIXCONV_LOAD_PIXEL8_ALIGNED(xmm0, srcY); PIXCONV_LOAD_PIXEL8_ALIGNED(xmm5, srcY+srcStrideY); srcY += 16; } else { PIXCONV_LOAD_4PIXEL16(xmm0, srcY); PIXCONV_LOAD_4PIXEL16(xmm5, srcY+srcStrideY); srcY += 8; xmm0 = _mm_unpacklo_epi8(xmm0, xmm7); /* YYYYYYYY (16-bit fields)*/ xmm5 = _mm_unpacklo_epi8(xmm5, xmm7); /* YYYYYYYY (16-bit fields) */ } // Dither everything to 8-bit // Dithering if (dithertype == LAVDither_Random) { /* Load random dithering coeffs from the dithers buffer */ int offset = (pos % (DITHER_STEPS * 8 * 2)) * 2; xmm6 = _mm_load_si128((const __m128i *)(dithers + 0 + offset)); xmm7 = _mm_load_si128((const __m128i *)(dithers + 8 + offset)); } else { PIXCONV_LOAD_DITHER_COEFFS(xmm6, line+0, shift+2, odithers); PIXCONV_LOAD_DITHER_COEFFS(xmm7, line+1, shift+2, odithers2); } // Dither UV xmm1 = _mm_adds_epu16(xmm1, xmm6); xmm3 = _mm_adds_epu16(xmm3, xmm7); xmm1 = _mm_srai_epi16(xmm1, shift+2); xmm3 = _mm_srai_epi16(xmm3, shift+2); if (shift) { /* Y only needs to be dithered if it was > 8 bit */ xmm6 = _mm_srli_epi16(xmm6, 2); /* Shift dithering coeffs to proper strength */ xmm7 = _mm_srli_epi16(xmm6, 2); xmm0 = _mm_adds_epu16(xmm0, xmm6); /* Apply dithering coeffs */ xmm0 = _mm_srai_epi16(xmm0, shift); /* Shift to 8 bit */ xmm5 = _mm_adds_epu16(xmm5, xmm7); /* Apply dithering coeffs */ xmm5 = _mm_srai_epi16(xmm5, shift); /* Shift to 8 bit */ } // Pack into 8-bit containers xmm0 = _mm_packus_epi16(xmm0, xmm5); xmm1 = _mm_packus_epi16(xmm1, xmm3); // Interleave U/V with Y if (uyvy) { xmm3 = xmm1; xmm3 = _mm_unpacklo_epi8(xmm3, xmm0); xmm4 = _mm_unpackhi_epi8(xmm1, xmm0); } else { xmm3 = xmm0; xmm3 = _mm_unpacklo_epi8(xmm3, xmm1); xmm4 = _mm_unpackhi_epi8(xmm0, xmm1); } // Write back into the target memory _mm_stream_si128((__m128i *)(dst), xmm3); _mm_stream_si128((__m128i *)(dst + dstStride), xmm4); dst += 16; return 0; } template <LAVPixelFormat inputFormat, int shift, int uyvy, int dithertype> static int __stdcall yuv420yuy2_process_lines(const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, uint8_t *dst, int width, int height, ptrdiff_t srcStrideY, ptrdiff_t srcStrideUV, ptrdiff_t dstStride, const uint16_t *dithers) { const uint8_t *y = srcY; const uint8_t *u = srcU; const uint8_t *v = srcV; uint8_t *yuy2 = dst; // Processing starts at line 1, and ends at height - 1. The first and last line have special handling ptrdiff_t line = 1; const ptrdiff_t lastLine = height - 1; const uint16_t *lineDither = dithers; _mm_sfence(); // Process first line // This needs special handling because of the chroma offset of YUV420 for (ptrdiff_t i = 0; i < width; i += 8) { yuv420yuy2_convert_pixels<inputFormat, shift, uyvy, dithertype>(y, u, v, yuy2, 0, 0, 0, 0, lineDither, i); } for (; line < lastLine; line += 2) { if (dithertype == LAVDither_Random) lineDither = dithers + (line * 16 * DITHER_STEPS); y = srcY + line * srcStrideY; u = srcU + (line >> 1) * srcStrideUV; v = srcV + (line >> 1) * srcStrideUV; yuy2 = dst + line * dstStride; for (int i = 0; i < width; i += 8) { yuv420yuy2_convert_pixels<inputFormat, shift, uyvy, dithertype>(y, u, v, yuy2, srcStrideY, srcStrideUV, dstStride, line, lineDither, i); } } // Process last line // This needs special handling because of the chroma offset of YUV420 if (dithertype == LAVDither_Random) lineDither = dithers + ((height - 2) * 16 * DITHER_STEPS); y = srcY + (height - 1) * srcStrideY; u = srcU + ((height >> 1) - 1) * srcStrideUV; v = srcV + ((height >> 1) - 1) * srcStrideUV; yuy2 = dst + (height - 1) * dstStride; for (ptrdiff_t i = 0; i < width; i += 8) { yuv420yuy2_convert_pixels<inputFormat, shift, uyvy, dithertype>(y, u, v, yuy2, 0, 0, 0, line, lineDither, i); } return 0; } template<int uyvy, int dithertype> static int __stdcall yuv420yuy2_dispatch(LAVPixelFormat inputFormat, int bpp, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV, uint8_t *dst, int width, int height, ptrdiff_t srcStrideY, ptrdiff_t srcStrideUV, ptrdiff_t dstStride, const uint16_t *dithers) { // Wrap the input format into template args switch (inputFormat) { case LAVPixFmt_YUV420: return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 0, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); case LAVPixFmt_NV12: return yuv420yuy2_process_lines<LAVPixFmt_NV12, 0, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); case LAVPixFmt_YUV420bX: if (bpp == 9) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 1, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); else if (bpp == 10) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 2, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); /*else if (bpp == 11) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 3, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers);*/ else if (bpp == 12) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 4, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); /*else if (bpp == 13) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 5, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers);*/ else if (bpp == 14) return yuv420yuy2_process_lines<LAVPixFmt_YUV420, 6, uyvy, dithertype>(srcY, srcU, srcV, dst, width, height, srcStrideY, srcStrideUV, dstStride, dithers); else ASSERT(0); break; default: ASSERT(0); } return 0; } template<int uyvy> DECLARE_CONV_FUNC_IMPL(convert_yuv420_yuy2) { LAVDitherMode ditherMode = m_pSettings->GetDitherMode(); const uint16_t *dithers = (ditherMode == LAVDither_Random) ? GetRandomDitherCoeffs(height, DITHER_STEPS * 2, bpp - 8 + 2, 0) : nullptr; if (ditherMode == LAVDither_Random && dithers != nullptr) { yuv420yuy2_dispatch<uyvy, 1>(inputFormat, bpp, src[0], src[1], src[2], dst[0], width, height, srcStride[0], srcStride[1], dstStride[0], dithers); } else { yuv420yuy2_dispatch<uyvy, 0>(inputFormat, bpp, src[0], src[1], src[2], dst[0], width, height, srcStride[0], srcStride[1], dstStride[0], nullptr); } return S_OK; } // Force creation of these two variants template HRESULT CLAVPixFmtConverter::convert_yuv420_yuy2<0>CONV_FUNC_PARAMS; template HRESULT CLAVPixFmtConverter::convert_yuv420_yuy2<1>CONV_FUNC_PARAMS;
gpl-2.0
friedrich420/Note4-TMO-AELKernel
drivers/video/msm/mdss/mdss_dsi_panel.c
19
37854
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/qpnp/pin.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/qpnp/pwm.h> #include <linux/err.h> #include "mdss_dsi.h" #define DT_CMD_HDR 6 DEFINE_LED_TRIGGER(bl_led_trigger); void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl) { ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt"); if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) { pr_err("%s: Error: lpg_chan=%d pwm request failed", __func__, ctrl->pwm_lpg_chan); } } static void mdss_dsi_panel_bklt_pwm(struct mdss_dsi_ctrl_pdata *ctrl, int level) { int ret; u32 duty; if (ctrl->pwm_bl == NULL) { pr_err("%s: no PWM\n", __func__); return; } if (level == 0) { if (ctrl->pwm_enabled) pwm_disable(ctrl->pwm_bl); ctrl->pwm_enabled = 0; return; } duty = level * ctrl->pwm_period; duty /= ctrl->bklt_max; pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n", __func__, ctrl->bklt_ctrl, ctrl->pwm_period, ctrl->pwm_pmic_gpio, ctrl->pwm_lpg_chan); pr_debug("%s: ndx=%d level=%d duty=%d\n", __func__, ctrl->ndx, level, duty); if (ctrl->pwm_enabled) { pwm_disable(ctrl->pwm_bl); ctrl->pwm_enabled = 0; } ret = pwm_config_us(ctrl->pwm_bl, duty, ctrl->pwm_period); if (ret) { pr_err("%s: pwm_config_us() failed err=%d.\n", __func__, ret); return; } ret = pwm_enable(ctrl->pwm_bl); if (ret) pr_err("%s: pwm_enable() failed err=%d\n", __func__, ret); ctrl->pwm_enabled = 1; } static char dcs_cmd[2] = {0x54, 0x00}; /* DTYPE_DCS_READ */ static struct dsi_cmd_desc dcs_read_cmd = { {DTYPE_DCS_READ, 1, 0, 1, 5, sizeof(dcs_cmd)}, dcs_cmd }; u32 mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0, char cmd1, void (*fxn)(int), char *rbuf, int len) { struct dcs_cmd_req cmdreq; struct mdss_panel_info *pinfo; pinfo = &(ctrl->panel_data.panel_info); if (pinfo->partial_update_dcs_cmd_by_left) { if (ctrl->ndx != DSI_CTRL_LEFT) return -EINVAL; } dcs_cmd[0] = cmd0; dcs_cmd[1] = cmd1; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = &dcs_read_cmd; cmdreq.cmds_cnt = 1; cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT; cmdreq.rlen = len; cmdreq.rbuf = rbuf; cmdreq.cb = fxn; /* call back */ mdss_dsi_cmdlist_put(ctrl, &cmdreq); /* * blocked here, until call back called */ return 0; } static void mdss_dsi_panel_cmds_send(struct mdss_dsi_ctrl_pdata *ctrl, struct dsi_panel_cmds *pcmds) { struct dcs_cmd_req cmdreq; struct mdss_panel_info *pinfo; pinfo = &(ctrl->panel_data.panel_info); if (pinfo->partial_update_dcs_cmd_by_left) { if (ctrl->ndx != DSI_CTRL_LEFT) return; } memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = pcmds->cmds; cmdreq.cmds_cnt = pcmds->cmd_cnt; cmdreq.flags = CMD_REQ_COMMIT; /*Panel ON/Off commands should be sent in DSI Low Power Mode*/ if (pcmds->link_state == DSI_LP_MODE) cmdreq.flags |= CMD_REQ_LP_MODE; cmdreq.rlen = 0; cmdreq.cb = NULL; mdss_dsi_cmdlist_put(ctrl, &cmdreq); } static char led_pwm1[2] = {0x51, 0x0}; /* DTYPE_DCS_WRITE1 */ static struct dsi_cmd_desc backlight_cmd = { {DTYPE_DCS_WRITE1, 1, 0, 0, 1, sizeof(led_pwm1)}, led_pwm1 }; static void mdss_dsi_panel_bklt_dcs(struct mdss_dsi_ctrl_pdata *ctrl, int level) { struct dcs_cmd_req cmdreq; struct mdss_panel_info *pinfo; pinfo = &(ctrl->panel_data.panel_info); if (pinfo->partial_update_dcs_cmd_by_left) { if (ctrl->ndx != DSI_CTRL_LEFT) return; } pr_debug("%s: level=%d\n", __func__, level); led_pwm1[1] = (unsigned char)level; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = &backlight_cmd; cmdreq.cmds_cnt = 1; cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL; cmdreq.rlen = 0; cmdreq.cb = NULL; mdss_dsi_cmdlist_put(ctrl, &cmdreq); } static int mdss_dsi_request_gpios(struct mdss_dsi_ctrl_pdata *ctrl_pdata) { int rc = 0; if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) { rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable"); if (rc) { pr_err("request disp_en gpio failed, rc=%d\n", rc); goto disp_en_gpio_err; } } rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n"); if (rc) { pr_err("request reset gpio failed, rc=%d\n", rc); goto rst_gpio_err; } if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) { rc = gpio_request(ctrl_pdata->bklt_en_gpio, "bklt_enable"); if (rc) { pr_err("request bklt gpio failed, rc=%d\n", rc); goto bklt_en_gpio_err; } } if (gpio_is_valid(ctrl_pdata->mode_gpio)) { rc = gpio_request(ctrl_pdata->mode_gpio, "panel_mode"); if (rc) { pr_err("request panel mode gpio failed,rc=%d\n", rc); goto mode_gpio_err; } } return rc; mode_gpio_err: if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) gpio_free(ctrl_pdata->bklt_en_gpio); bklt_en_gpio_err: gpio_free(ctrl_pdata->rst_gpio); rst_gpio_err: if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) gpio_free(ctrl_pdata->disp_en_gpio); disp_en_gpio_err: return rc; } int mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable) { struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; struct mdss_panel_info *pinfo = NULL; int i, rc = 0; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) { pr_debug("%s:%d, reset line not configured\n", __func__, __LINE__); } if (!gpio_is_valid(ctrl_pdata->rst_gpio)) { pr_debug("%s:%d, reset line not configured\n", __func__, __LINE__); return rc; } pr_debug("%s: enable = %d\n", __func__, enable); pinfo = &(ctrl_pdata->panel_data.panel_info); if (enable) { rc = mdss_dsi_request_gpios(ctrl_pdata); if (rc) { pr_err("gpio request failed\n"); return rc; } if (!pinfo->cont_splash_enabled) { if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) gpio_set_value((ctrl_pdata->disp_en_gpio), 1); for (i = 0; i < pdata->panel_info.rst_seq_len; ++i) { gpio_set_value((ctrl_pdata->rst_gpio), pdata->panel_info.rst_seq[i]); if (pdata->panel_info.rst_seq[++i]) usleep(pinfo->rst_seq[i] * 1000); } if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) gpio_set_value((ctrl_pdata->bklt_en_gpio), 1); } if (gpio_is_valid(ctrl_pdata->mode_gpio)) { if (pinfo->mode_gpio_state == MODE_GPIO_HIGH) gpio_set_value((ctrl_pdata->mode_gpio), 1); else if (pinfo->mode_gpio_state == MODE_GPIO_LOW) gpio_set_value((ctrl_pdata->mode_gpio), 0); } if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT) { pr_debug("%s: Panel Not properly turned OFF\n", __func__); ctrl_pdata->ctrl_state &= ~CTRL_STATE_PANEL_INIT; pr_debug("%s: Reset panel done\n", __func__); } } else { if (gpio_is_valid(ctrl_pdata->bklt_en_gpio)) { gpio_set_value((ctrl_pdata->bklt_en_gpio), 0); gpio_free(ctrl_pdata->bklt_en_gpio); } if (gpio_is_valid(ctrl_pdata->disp_en_gpio)) { gpio_set_value((ctrl_pdata->disp_en_gpio), 0); gpio_free(ctrl_pdata->disp_en_gpio); } gpio_set_value((ctrl_pdata->rst_gpio), 0); gpio_free(ctrl_pdata->rst_gpio); if (gpio_is_valid(ctrl_pdata->mode_gpio)) gpio_free(ctrl_pdata->mode_gpio); } return rc; } /** * mdss_dsi_roi_merge() - merge two roi into single roi * * Function used by partial update with only one dsi intf take 2A/2B * (column/page) dcs commands. */ static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl, struct mdss_rect *roi) { struct mdss_panel_info *l_pinfo; struct mdss_rect *l_roi; struct mdss_rect *r_roi; struct mdss_dsi_ctrl_pdata *other = NULL; int ans = 0; if (ctrl->ndx == DSI_CTRL_LEFT) { other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_RIGHT); if (!other) return ans; l_pinfo = &(ctrl->panel_data.panel_info); l_roi = &(ctrl->panel_data.panel_info.roi); r_roi = &(other->panel_data.panel_info.roi); } else { other = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT); if (!other) return ans; l_pinfo = &(other->panel_data.panel_info); l_roi = &(other->panel_data.panel_info.roi); r_roi = &(ctrl->panel_data.panel_info.roi); } if (l_roi->w == 0 && l_roi->h == 0) { /* right only */ *roi = *r_roi; roi->x += l_pinfo->xres;/* add left full width to x-offset */ } else { /* left only and left+righ */ *roi = *l_roi; roi->w += r_roi->w; /* add right width */ ans = 1; } return ans; } static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */ static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00}; /* DTYPE_DCS_LWRITE */ /* pack into one frame before sent */ static struct dsi_cmd_desc set_col_page_addr_cmd[] = { {{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset}, /* packed */ {{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset}, }; static int mdss_dsi_set_col_page_addr(struct mdss_panel_data *pdata) { struct mdss_panel_info *pinfo; struct mdss_rect roi; struct mdss_rect *p_roi; struct mdss_rect *c_roi; struct mdss_dsi_ctrl_pdata *ctrl = NULL; struct dcs_cmd_req cmdreq; int left_or_both = 0; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); pinfo = &pdata->panel_info; p_roi = &pinfo->roi; c_roi = &ctrl->roi; /* * if broadcase mode enable or roi had changed * then do col_page update */ if (mdss_dsi_broadcast_mode_enabled() || !mdss_rect_cmp(c_roi, p_roi)) { pr_debug("%s: ndx=%d x=%d y=%d w=%d h=%d\n", __func__, ctrl->ndx, p_roi->x, p_roi->y, p_roi->w, p_roi->h); *c_roi = *p_roi; /* keep to ctrl */ if (c_roi->w == 0 || c_roi->h == 0) { /* no new frame update */ pr_debug("%s: ctrl=%d, no partial roi set\n", __func__, ctrl->ndx); if (!mdss_dsi_broadcast_mode_enabled()) return 0; } roi = *c_roi; if (pinfo->partial_update_roi_merge) left_or_both = mdss_dsi_roi_merge(ctrl, &roi); if (pinfo->partial_update_dcs_cmd_by_left) { if (left_or_both && ctrl->ndx == DSI_CTRL_RIGHT) { /* 2A/2B sent by left already */ return 0; } } caset[1] = (((roi.x) & 0xFF00) >> 8); caset[2] = (((roi.x) & 0xFF)); caset[3] = (((roi.x - 1 + roi.w) & 0xFF00) >> 8); caset[4] = (((roi.x - 1 + roi.w) & 0xFF)); set_col_page_addr_cmd[0].payload = caset; paset[1] = (((roi.y) & 0xFF00) >> 8); paset[2] = (((roi.y) & 0xFF)); paset[3] = (((roi.y - 1 + roi.h) & 0xFF00) >> 8); paset[4] = (((roi.y - 1 + roi.h) & 0xFF)); set_col_page_addr_cmd[1].payload = paset; memset(&cmdreq, 0, sizeof(cmdreq)); cmdreq.cmds = set_col_page_addr_cmd; cmdreq.cmds_cnt = 2; cmdreq.flags = CMD_REQ_COMMIT | CMD_CLK_CTRL; cmdreq.rlen = 0; cmdreq.cb = NULL; if (pinfo->partial_update_dcs_cmd_by_left) ctrl = mdss_dsi_get_ctrl_by_index(DSI_CTRL_LEFT); mdss_dsi_cmdlist_put(ctrl, &cmdreq); } return 0; } static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata, u32 bl_level) { struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return; } ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); /* * Some backlight controllers specify a minimum duty cycle * for the backlight brightness. If the brightness is less * than it, the controller can malfunction. */ if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0)) bl_level = pdata->panel_info.bl_min; switch (ctrl_pdata->bklt_ctrl) { case BL_WLED: led_trigger_event(bl_led_trigger, bl_level); break; case BL_PWM: mdss_dsi_panel_bklt_pwm(ctrl_pdata, bl_level); break; case BL_DCS_CMD: mdss_dsi_panel_bklt_dcs(ctrl_pdata, bl_level); if (mdss_dsi_is_master_ctrl(ctrl_pdata)) { struct mdss_dsi_ctrl_pdata *sctrl = mdss_dsi_get_slave_ctrl(); if (!sctrl) { pr_err("%s: Invalid slave ctrl data\n", __func__); return; } mdss_dsi_panel_bklt_dcs(sctrl, bl_level); } break; default: pr_err("%s: Unknown bl_ctrl configuration\n", __func__); break; } } static int mdss_dsi_panel_on(struct mdss_panel_data *pdata) { struct mdss_dsi_ctrl_pdata *ctrl = NULL; struct mdss_panel_info *pinfo; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } pinfo = &pdata->panel_info; ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx); if (pinfo->partial_update_dcs_cmd_by_left) { if (ctrl->ndx != DSI_CTRL_LEFT) goto end; } if (ctrl->on_cmds.cmd_cnt) mdss_dsi_panel_cmds_send(ctrl, &ctrl->on_cmds); end: pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK; pr_debug("%s:-\n", __func__); return 0; } static int mdss_dsi_panel_off(struct mdss_panel_data *pdata) { struct mdss_dsi_ctrl_pdata *ctrl = NULL; struct mdss_panel_info *pinfo; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } pinfo = &pdata->panel_info; ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx); if (pinfo->partial_update_dcs_cmd_by_left) { if (ctrl->ndx != DSI_CTRL_LEFT) goto end; } if (ctrl->off_cmds.cmd_cnt) mdss_dsi_panel_cmds_send(ctrl, &ctrl->off_cmds); end: pinfo->blank_state = MDSS_PANEL_BLANK_BLANK; pr_debug("%s:-\n", __func__); return 0; } static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata, int enable) { struct mdss_dsi_ctrl_pdata *ctrl = NULL; struct mdss_panel_info *pinfo; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); return -EINVAL; } pinfo = &pdata->panel_info; ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata, panel_data); pr_debug("%s: ctrl=%p ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx, enable); /* Any panel specific low power commands/config */ if (enable) pinfo->blank_state = MDSS_PANEL_BLANK_LOW_POWER; else pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK; pr_debug("%s:-\n", __func__); return 0; } static void mdss_dsi_parse_lane_swap(struct device_node *np, char *dlane_swap) { const char *data; *dlane_swap = DSI_LANE_MAP_0123; data = of_get_property(np, "qcom,mdss-dsi-lane-map", NULL); if (data) { if (!strcmp(data, "lane_map_3012")) *dlane_swap = DSI_LANE_MAP_3012; else if (!strcmp(data, "lane_map_2301")) *dlane_swap = DSI_LANE_MAP_2301; else if (!strcmp(data, "lane_map_1230")) *dlane_swap = DSI_LANE_MAP_1230; else if (!strcmp(data, "lane_map_0321")) *dlane_swap = DSI_LANE_MAP_0321; else if (!strcmp(data, "lane_map_1032")) *dlane_swap = DSI_LANE_MAP_1032; else if (!strcmp(data, "lane_map_2103")) *dlane_swap = DSI_LANE_MAP_2103; else if (!strcmp(data, "lane_map_3210")) *dlane_swap = DSI_LANE_MAP_3210; } } static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger, char *trigger_key) { const char *data; *trigger = DSI_CMD_TRIGGER_SW; data = of_get_property(np, trigger_key, NULL); if (data) { if (!strcmp(data, "none")) *trigger = DSI_CMD_TRIGGER_NONE; else if (!strcmp(data, "trigger_te")) *trigger = DSI_CMD_TRIGGER_TE; else if (!strcmp(data, "trigger_sw_seof")) *trigger = DSI_CMD_TRIGGER_SW_SEOF; else if (!strcmp(data, "trigger_sw_te")) *trigger = DSI_CMD_TRIGGER_SW_TE; } } static int mdss_dsi_parse_dcs_cmds(struct device_node *np, struct dsi_panel_cmds *pcmds, char *cmd_key, char *link_key) { const char *data; int blen = 0, len; char *buf, *bp; struct dsi_ctrl_hdr *dchdr; int i, cnt; data = of_get_property(np, cmd_key, &blen); if (!data) { pr_err("%s: failed, key=%s\n", __func__, cmd_key); return -ENOMEM; } buf = kzalloc(sizeof(char) * blen, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy(buf, data, blen); /* scan dcs commands */ bp = buf; len = blen; cnt = 0; while (len >= sizeof(*dchdr)) { dchdr = (struct dsi_ctrl_hdr *)bp; dchdr->dlen = ntohs(dchdr->dlen); if (dchdr->dlen > len) { pr_err("%s: dtsi cmd=%x error, len=%d", __func__, dchdr->dtype, dchdr->dlen); goto exit_free; } bp += sizeof(*dchdr); len -= sizeof(*dchdr); bp += dchdr->dlen; len -= dchdr->dlen; cnt++; } if (len != 0) { pr_err("%s: dcs_cmd=%x len=%d error!", __func__, buf[0], blen); goto exit_free; } pcmds->cmds = kzalloc(cnt * sizeof(struct dsi_cmd_desc), GFP_KERNEL); if (!pcmds->cmds) goto exit_free; pcmds->cmd_cnt = cnt; pcmds->buf = buf; pcmds->blen = blen; bp = buf; len = blen; for (i = 0; i < cnt; i++) { dchdr = (struct dsi_ctrl_hdr *)bp; len -= sizeof(*dchdr); bp += sizeof(*dchdr); pcmds->cmds[i].dchdr = *dchdr; pcmds->cmds[i].payload = bp; bp += dchdr->dlen; len -= dchdr->dlen; } data = of_get_property(np, link_key, NULL); if (data && !strcmp(data, "dsi_hs_mode")) pcmds->link_state = DSI_HS_MODE; else pcmds->link_state = DSI_LP_MODE; pr_debug("%s: dcs_cmd=%x len=%d, cmd_cnt=%d link_state=%d\n", __func__, pcmds->buf[0], pcmds->blen, pcmds->cmd_cnt, pcmds->link_state); return 0; exit_free: kfree(buf); return -ENOMEM; } static int mdss_panel_dt_get_dst_fmt(u32 bpp, char mipi_mode, u32 pixel_packing, char *dst_format) { int rc = 0; switch (bpp) { case 3: *dst_format = DSI_CMD_DST_FORMAT_RGB111; break; case 8: *dst_format = DSI_CMD_DST_FORMAT_RGB332; break; case 12: *dst_format = DSI_CMD_DST_FORMAT_RGB444; break; case 16: switch (mipi_mode) { case DSI_VIDEO_MODE: *dst_format = DSI_VIDEO_DST_FORMAT_RGB565; break; case DSI_CMD_MODE: *dst_format = DSI_CMD_DST_FORMAT_RGB565; break; default: *dst_format = DSI_VIDEO_DST_FORMAT_RGB565; break; } break; case 18: switch (mipi_mode) { case DSI_VIDEO_MODE: if (pixel_packing == 0) *dst_format = DSI_VIDEO_DST_FORMAT_RGB666; else *dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE; break; case DSI_CMD_MODE: *dst_format = DSI_CMD_DST_FORMAT_RGB666; break; default: if (pixel_packing == 0) *dst_format = DSI_VIDEO_DST_FORMAT_RGB666; else *dst_format = DSI_VIDEO_DST_FORMAT_RGB666_LOOSE; break; } break; case 24: switch (mipi_mode) { case DSI_VIDEO_MODE: *dst_format = DSI_VIDEO_DST_FORMAT_RGB888; break; case DSI_CMD_MODE: *dst_format = DSI_CMD_DST_FORMAT_RGB888; break; default: *dst_format = DSI_VIDEO_DST_FORMAT_RGB888; break; } break; default: rc = -EINVAL; break; } return rc; } static int mdss_dsi_parse_fbc_params(struct device_node *np, struct mdss_panel_info *panel_info) { int rc, fbc_enabled = 0; u32 tmp; fbc_enabled = of_property_read_bool(np, "qcom,mdss-dsi-fbc-enable"); if (fbc_enabled) { pr_debug("%s:%d FBC panel enabled.\n", __func__, __LINE__); panel_info->fbc.enabled = 1; rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bpp", &tmp); panel_info->fbc.target_bpp = (!rc ? tmp : panel_info->bpp); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-packing", &tmp); panel_info->fbc.comp_mode = (!rc ? tmp : 0); panel_info->fbc.qerr_enable = of_property_read_bool(np, "qcom,mdss-dsi-fbc-quant-error"); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-bias", &tmp); panel_info->fbc.cd_bias = (!rc ? tmp : 0); panel_info->fbc.pat_enable = of_property_read_bool(np, "qcom,mdss-dsi-fbc-pat-mode"); panel_info->fbc.vlc_enable = of_property_read_bool(np, "qcom,mdss-dsi-fbc-vlc-mode"); panel_info->fbc.bflc_enable = of_property_read_bool(np, "qcom,mdss-dsi-fbc-bflc-mode"); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-h-line-budget", &tmp); panel_info->fbc.line_x_budget = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-budget-ctrl", &tmp); panel_info->fbc.block_x_budget = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-block-budget", &tmp); panel_info->fbc.block_budget = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-lossless-threshold", &tmp); panel_info->fbc.lossless_mode_thd = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-lossy-threshold", &tmp); panel_info->fbc.lossy_mode_thd = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-rgb-threshold", &tmp); panel_info->fbc.lossy_rgb_thd = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-fbc-lossy-mode-idx", &tmp); panel_info->fbc.lossy_mode_idx = (!rc ? tmp : 0); } else { pr_debug("%s:%d Panel does not support FBC.\n", __func__, __LINE__); panel_info->fbc.enabled = 0; panel_info->fbc.target_bpp = panel_info->bpp; } return 0; } static void mdss_panel_parse_te_params(struct device_node *np, struct mdss_panel_info *panel_info) { u32 tmp; int rc = 0; /* * TE default: dsi byte clock calculated base on 70 fps; * around 14 ms to complete a kickoff cycle if te disabled; * vclk_line base on 60 fps; write is faster than read; * init == start == rdptr; */ panel_info->te.tear_check_en = !of_property_read_bool(np, "qcom,mdss-tear-check-disable"); rc = of_property_read_u32 (np, "qcom,mdss-tear-check-sync-cfg-height", &tmp); panel_info->te.sync_cfg_height = (!rc ? tmp : 0xfff0); rc = of_property_read_u32 (np, "qcom,mdss-tear-check-sync-init-val", &tmp); panel_info->te.vsync_init_val = (!rc ? tmp : panel_info->yres); rc = of_property_read_u32 (np, "qcom,mdss-tear-check-sync-threshold-start", &tmp); panel_info->te.sync_threshold_start = (!rc ? tmp : 4); rc = of_property_read_u32 (np, "qcom,mdss-tear-check-sync-threshold-continue", &tmp); panel_info->te.sync_threshold_continue = (!rc ? tmp : 4); rc = of_property_read_u32(np, "qcom,mdss-tear-check-start-pos", &tmp); panel_info->te.start_pos = (!rc ? tmp : panel_info->yres); rc = of_property_read_u32 (np, "qcom,mdss-tear-check-rd-ptr-trigger-intr", &tmp); panel_info->te.rd_ptr_irq = (!rc ? tmp : panel_info->yres + 1); rc = of_property_read_u32(np, "qcom,mdss-tear-check-frame-rate", &tmp); panel_info->te.refx100 = (!rc ? tmp : 6000); } static int mdss_dsi_parse_reset_seq(struct device_node *np, u32 rst_seq[MDSS_DSI_RST_SEQ_LEN], u32 *rst_len, const char *name) { int num = 0, i; int rc; struct property *data; u32 tmp[MDSS_DSI_RST_SEQ_LEN]; *rst_len = 0; data = of_find_property(np, name, &num); num /= sizeof(u32); if (!data || !num || num > MDSS_DSI_RST_SEQ_LEN || num % 2) { pr_debug("%s:%d, error reading %s, length found = %d\n", __func__, __LINE__, name, num); } else { rc = of_property_read_u32_array(np, name, tmp, num); if (rc) pr_debug("%s:%d, error reading %s, rc = %d\n", __func__, __LINE__, name, rc); else { for (i = 0; i < num; ++i) rst_seq[i] = tmp[i]; *rst_len = num; } } return 0; } static void mdss_dsi_parse_roi_alignment(struct device_node *np, struct mdss_panel_info *pinfo) { int len = 0; u32 value[6]; struct property *data; data = of_find_property(np, "qcom,panel-roi-alignment", &len); len /= sizeof(u32); if (!data || (len != 6)) { pr_debug("%s: Panel roi alignment not found", __func__); } else { int rc = of_property_read_u32_array(np, "qcom,panel-roi-alignment", value, len); if (rc) pr_debug("%s: Error reading panel roi alignment values", __func__); else { pinfo->xstart_pix_align = value[0]; pinfo->width_pix_align = value[1]; pinfo->ystart_pix_align = value[2]; pinfo->height_pix_align = value[3]; pinfo->min_width = value[4]; pinfo->min_height = value[5]; } pr_debug("%s: ROI alignment: [%d, %d, %d, %d, %d, %d]", __func__, pinfo->xstart_pix_align, pinfo->width_pix_align, pinfo->ystart_pix_align, pinfo->height_pix_align, pinfo->min_width, pinfo->min_height); } } static int mdss_dsi_parse_panel_features(struct device_node *np, struct mdss_dsi_ctrl_pdata *ctrl) { struct mdss_panel_info *pinfo; if (!np || !ctrl) { pr_err("%s: Invalid arguments\n", __func__); return -ENODEV; } pinfo = &ctrl->panel_data.panel_info; pinfo->cont_splash_enabled = of_property_read_bool(np, "qcom,cont-splash-enabled"); if (pinfo->mipi.mode == DSI_CMD_MODE) { pinfo->partial_update_enabled = of_property_read_bool(np, "qcom,partial-update-enabled"); pr_info("%s: partial_update_enabled=%d\n", __func__, pinfo->partial_update_enabled); if (pinfo->partial_update_enabled) { ctrl->set_col_page_addr = mdss_dsi_set_col_page_addr; pinfo->partial_update_dcs_cmd_by_left = of_property_read_bool(np, "qcom,partial-update-dcs-cmd-by-left"); pinfo->partial_update_roi_merge = of_property_read_bool(np, "qcom,partial-update-roi-merge"); } } pinfo->ulps_feature_enabled = of_property_read_bool(np, "qcom,ulps-enabled"); pr_info("%s: ulps feature %s", __func__, (pinfo->ulps_feature_enabled ? "enabled" : "disabled")); return 0; } static void mdss_dsi_parse_panel_horizintal_line_idle(struct device_node *np, struct mdss_dsi_ctrl_pdata *ctrl) { const u32 *src; int i, len, cnt; struct panel_horizontal_idle *kp; if (!np || !ctrl) { pr_err("%s: Invalid arguments\n", __func__); return; } src = of_get_property(np, "qcom,mdss-dsi-hor-line-idle", &len); if (!src || len == 0) return; cnt = len % 3; /* 3 fields per entry */ if (cnt) { pr_err("%s: invalid horizontal idle len=%d\n", __func__, len); return; } cnt = len / sizeof(u32); kp = kzalloc(sizeof(*kp) * (cnt / 3), GFP_KERNEL); if (kp == NULL) { pr_err("%s: No memory\n", __func__); return; } ctrl->line_idle = kp; for (i = 0; i < cnt; i += 3) { kp->min = be32_to_cpu(src[i]); kp->max = be32_to_cpu(src[i+1]); kp->idle = be32_to_cpu(src[i+2]); kp++; ctrl->horizontal_idle_cnt++; } pr_debug("%s: horizontal_idle_cnt=%d\n", __func__, ctrl->horizontal_idle_cnt); } static int mdss_panel_parse_dt(struct device_node *np, struct mdss_dsi_ctrl_pdata *ctrl_pdata) { u32 tmp; int rc, i, len; const char *data; static const char *pdest; struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info); rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-width", &tmp); if (rc) { pr_err("%s:%d, panel width not specified\n", __func__, __LINE__); return -EINVAL; } pinfo->xres = (!rc ? tmp : 640); rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-height", &tmp); if (rc) { pr_err("%s:%d, panel height not specified\n", __func__, __LINE__); return -EINVAL; } pinfo->yres = (!rc ? tmp : 480); rc = of_property_read_u32(np, "qcom,mdss-pan-physical-width-dimension", &tmp); pinfo->physical_width = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-pan-physical-height-dimension", &tmp); pinfo->physical_height = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-left-border", &tmp); pinfo->lcdc.xres_pad = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-right-border", &tmp); if (!rc) pinfo->lcdc.xres_pad += tmp; rc = of_property_read_u32(np, "qcom,mdss-dsi-v-top-border", &tmp); pinfo->lcdc.yres_pad = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-v-bottom-border", &tmp); if (!rc) pinfo->lcdc.yres_pad += tmp; rc = of_property_read_u32(np, "qcom,mdss-dsi-bpp", &tmp); if (rc) { pr_err("%s:%d, bpp not specified\n", __func__, __LINE__); return -EINVAL; } pinfo->bpp = (!rc ? tmp : 24); pinfo->mipi.mode = DSI_VIDEO_MODE; data = of_get_property(np, "qcom,mdss-dsi-panel-type", NULL); if (data && !strncmp(data, "dsi_cmd_mode", 12)) pinfo->mipi.mode = DSI_CMD_MODE; tmp = 0; data = of_get_property(np, "qcom,mdss-dsi-pixel-packing", NULL); if (data && !strcmp(data, "loose")) tmp = 1; rc = mdss_panel_dt_get_dst_fmt(pinfo->bpp, pinfo->mipi.mode, tmp, &(pinfo->mipi.dst_format)); if (rc) { pr_debug("%s: problem determining dst format. Set Default\n", __func__); pinfo->mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; } pdest = of_get_property(np, "qcom,mdss-dsi-panel-destination", NULL); if (pdest) { if (strlen(pdest) != 9) { pr_err("%s: Unknown pdest specified\n", __func__); return -EINVAL; } if (!strcmp(pdest, "display_1")) pinfo->pdest = DISPLAY_1; else if (!strcmp(pdest, "display_2")) pinfo->pdest = DISPLAY_2; else { pr_debug("%s: incorrect pdest. Set Default\n", __func__); pinfo->pdest = DISPLAY_1; } } else { pr_debug("%s: pdest not specified. Set Default\n", __func__); pinfo->pdest = DISPLAY_1; } rc = of_property_read_u32(np, "qcom,mdss-dsi-h-front-porch", &tmp); pinfo->lcdc.h_front_porch = (!rc ? tmp : 6); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-back-porch", &tmp); pinfo->lcdc.h_back_porch = (!rc ? tmp : 6); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-pulse-width", &tmp); pinfo->lcdc.h_pulse_width = (!rc ? tmp : 2); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-sync-skew", &tmp); pinfo->lcdc.hsync_skew = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-v-back-porch", &tmp); pinfo->lcdc.v_back_porch = (!rc ? tmp : 6); rc = of_property_read_u32(np, "qcom,mdss-dsi-v-front-porch", &tmp); pinfo->lcdc.v_front_porch = (!rc ? tmp : 6); rc = of_property_read_u32(np, "qcom,mdss-dsi-v-pulse-width", &tmp); pinfo->lcdc.v_pulse_width = (!rc ? tmp : 2); rc = of_property_read_u32(np, "qcom,mdss-dsi-underflow-color", &tmp); pinfo->lcdc.underflow_clr = (!rc ? tmp : 0xff); rc = of_property_read_u32(np, "qcom,mdss-dsi-border-color", &tmp); pinfo->lcdc.border_clr = (!rc ? tmp : 0); pinfo->bklt_ctrl = UNKNOWN_CTRL; data = of_get_property(np, "qcom,mdss-dsi-bl-pmic-control-type", NULL); if (data) { if (!strncmp(data, "bl_ctrl_wled", 12)) { led_trigger_register_simple("bkl-trigger", &bl_led_trigger); pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__); ctrl_pdata->bklt_ctrl = BL_WLED; } else if (!strncmp(data, "bl_ctrl_pwm", 11)) { ctrl_pdata->bklt_ctrl = BL_PWM; rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-pmic-pwm-frequency", &tmp); if (rc) { pr_err("%s:%d, Error, panel pwm_period\n", __func__, __LINE__); return -EINVAL; } ctrl_pdata->pwm_period = tmp; rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-pmic-bank-select", &tmp); if (rc) { pr_err("%s:%d, Error, dsi lpg channel\n", __func__, __LINE__); return -EINVAL; } ctrl_pdata->pwm_lpg_chan = tmp; tmp = of_get_named_gpio(np, "qcom,mdss-dsi-pwm-gpio", 0); ctrl_pdata->pwm_pmic_gpio = tmp; } else if (!strncmp(data, "bl_ctrl_dcs", 11)) { ctrl_pdata->bklt_ctrl = BL_DCS_CMD; } } rc = of_property_read_u32(np, "qcom,mdss-brightness-max-level", &tmp); pinfo->brightness_max = (!rc ? tmp : MDSS_MAX_BL_BRIGHTNESS); rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-min-level", &tmp); pinfo->bl_min = (!rc ? tmp : 0); rc = of_property_read_u32(np, "qcom,mdss-dsi-bl-max-level", &tmp); pinfo->bl_max = (!rc ? tmp : 255); ctrl_pdata->bklt_max = pinfo->bl_max; rc = of_property_read_u32(np, "qcom,mdss-dsi-interleave-mode", &tmp); pinfo->mipi.interleave_mode = (!rc ? tmp : 0); pinfo->mipi.vsync_enable = of_property_read_bool(np, "qcom,mdss-dsi-te-check-enable"); pinfo->mipi.hw_vsync_mode = of_property_read_bool(np, "qcom,mdss-dsi-te-using-te-pin"); rc = of_property_read_u32(np, "qcom,mdss-dsi-h-sync-pulse", &tmp); pinfo->mipi.pulse_mode_hsa_he = (!rc ? tmp : false); pinfo->mipi.hfp_power_stop = of_property_read_bool(np, "qcom,mdss-dsi-hfp-power-mode"); pinfo->mipi.hsa_power_stop = of_property_read_bool(np, "qcom,mdss-dsi-hsa-power-mode"); pinfo->mipi.hbp_power_stop = of_property_read_bool(np, "qcom,mdss-dsi-hbp-power-mode"); pinfo->mipi.last_line_interleave_en = of_property_read_bool(np, "qcom,mdss-dsi-last-line-interleave"); pinfo->mipi.bllp_power_stop = of_property_read_bool(np, "qcom,mdss-dsi-bllp-power-mode"); pinfo->mipi.eof_bllp_power_stop = of_property_read_bool( np, "qcom,mdss-dsi-bllp-eof-power-mode"); pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; data = of_get_property(np, "qcom,mdss-dsi-traffic-mode", NULL); if (data) { if (!strcmp(data, "non_burst_sync_event")) pinfo->mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT; else if (!strcmp(data, "burst_mode")) pinfo->mipi.traffic_mode = DSI_BURST_MODE; } rc = of_property_read_u32(np, "qcom,mdss-dsi-te-dcs-command", &tmp); pinfo->mipi.insert_dcs_cmd = (!rc ? tmp : 1); rc = of_property_read_u32(np, "qcom,mdss-dsi-wr-mem-continue", &tmp); pinfo->mipi.wr_mem_continue = (!rc ? tmp : 0x3c); rc = of_property_read_u32(np, "qcom,mdss-dsi-wr-mem-start", &tmp); pinfo->mipi.wr_mem_start = (!rc ? tmp : 0x2c); rc = of_property_read_u32(np, "qcom,mdss-dsi-te-pin-select", &tmp); pinfo->mipi.te_sel = (!rc ? tmp : 1); rc = of_property_read_u32(np, "qcom,mdss-dsi-virtual-channel-id", &tmp); pinfo->mipi.vc = (!rc ? tmp : 0); pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RGB; data = of_get_property(np, "qcom,mdss-dsi-color-order", NULL); if (data) { if (!strcmp(data, "rgb_swap_rbg")) pinfo->mipi.rgb_swap = DSI_RGB_SWAP_RBG; else if (!strcmp(data, "rgb_swap_bgr")) pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BGR; else if (!strcmp(data, "rgb_swap_brg")) pinfo->mipi.rgb_swap = DSI_RGB_SWAP_BRG; else if (!strcmp(data, "rgb_swap_grb")) pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GRB; else if (!strcmp(data, "rgb_swap_gbr")) pinfo->mipi.rgb_swap = DSI_RGB_SWAP_GBR; } pinfo->mipi.data_lane0 = of_property_read_bool(np, "qcom,mdss-dsi-lane-0-state"); pinfo->mipi.data_lane1 = of_property_read_bool(np, "qcom,mdss-dsi-lane-1-state"); pinfo->mipi.data_lane2 = of_property_read_bool(np, "qcom,mdss-dsi-lane-2-state"); pinfo->mipi.data_lane3 = of_property_read_bool(np, "qcom,mdss-dsi-lane-3-state"); rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-pre", &tmp); pinfo->mipi.t_clk_pre = (!rc ? tmp : 0x24); rc = of_property_read_u32(np, "qcom,mdss-dsi-t-clk-post", &tmp); pinfo->mipi.t_clk_post = (!rc ? tmp : 0x03); pinfo->mipi.rx_eot_ignore = of_property_read_bool(np, "qcom,mdss-dsi-rx-eot-ignore"); pinfo->mipi.tx_eot_append = of_property_read_bool(np, "qcom,mdss-dsi-tx-eot-append"); rc = of_property_read_u32(np, "qcom,mdss-dsi-stream", &tmp); pinfo->mipi.stream = (!rc ? tmp : 0); data = of_get_property(np, "qcom,mdss-dsi-panel-mode-gpio-state", NULL); if (data) { if (!strcmp(data, "high")) pinfo->mode_gpio_state = MODE_GPIO_HIGH; else if (!strcmp(data, "low")) pinfo->mode_gpio_state = MODE_GPIO_LOW; } else { pinfo->mode_gpio_state = MODE_GPIO_NOT_VALID; } rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-framerate", &tmp); pinfo->mipi.frame_rate = (!rc ? tmp : 60); rc = of_property_read_u32(np, "qcom,mdss-dsi-panel-clockrate", &tmp); pinfo->clk_rate = (!rc ? tmp : 0); data = of_get_property(np, "qcom,mdss-dsi-panel-timings", &len); if ((!data) || (len != 12)) { pr_err("%s:%d, Unable to read Phy timing settings", __func__, __LINE__); goto error; } for (i = 0; i < len; i++) pinfo->mipi.dsi_phy_db.timing[i] = data[i]; pinfo->mipi.lp11_init = of_property_read_bool(np, "qcom,mdss-dsi-lp11-init"); rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp); pinfo->mipi.init_delay = (!rc ? tmp : 0); mdss_dsi_parse_roi_alignment(np, pinfo); mdss_dsi_parse_trigger(np, &(pinfo->mipi.mdp_trigger), "qcom,mdss-dsi-mdp-trigger"); mdss_dsi_parse_trigger(np, &(pinfo->mipi.dma_trigger), "qcom,mdss-dsi-dma-trigger"); mdss_dsi_parse_lane_swap(np, &(pinfo->mipi.dlane_swap)); mdss_dsi_parse_fbc_params(np, pinfo); mdss_panel_parse_te_params(np, pinfo); mdss_dsi_parse_reset_seq(np, pinfo->rst_seq, &(pinfo->rst_seq_len), "qcom,mdss-dsi-reset-sequence"); mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->on_cmds, "qcom,mdss-dsi-on-command", "qcom,mdss-dsi-on-command-state"); mdss_dsi_parse_dcs_cmds(np, &ctrl_pdata->off_cmds, "qcom,mdss-dsi-off-command", "qcom,mdss-dsi-off-command-state"); rc = mdss_dsi_parse_panel_features(np, ctrl_pdata); if (rc) { pr_err("%s: failed to parse panel features\n", __func__); goto error; } mdss_dsi_parse_panel_horizintal_line_idle(np, ctrl_pdata); return 0; error: return -EINVAL; } int mdss_dsi_panel_init(struct device_node *node, struct mdss_dsi_ctrl_pdata *ctrl_pdata, bool cmd_cfg_cont_splash) { int rc = 0; static const char *panel_name; struct mdss_panel_info *pinfo; if (!node || !ctrl_pdata) { pr_err("%s: Invalid arguments\n", __func__); return -ENODEV; } pinfo = &ctrl_pdata->panel_data.panel_info; pr_debug("%s:%d\n", __func__, __LINE__); panel_name = of_get_property(node, "qcom,mdss-dsi-panel-name", NULL); if (!panel_name) pr_info("%s:%d, Panel name not specified\n", __func__, __LINE__); else pr_info("%s: Panel Name = %s\n", __func__, panel_name); rc = mdss_panel_parse_dt(node, ctrl_pdata); if (rc) { pr_err("%s:%d panel dt parse failed\n", __func__, __LINE__); return rc; } if (!cmd_cfg_cont_splash) pinfo->cont_splash_enabled = false; pr_info("%s: Continuous splash %s", __func__, pinfo->cont_splash_enabled ? "enabled" : "disabled"); ctrl_pdata->on = mdss_dsi_panel_on; ctrl_pdata->off = mdss_dsi_panel_off; ctrl_pdata->low_power_config = mdss_dsi_panel_low_power_config; ctrl_pdata->panel_data.set_backlight = mdss_dsi_panel_bl_ctrl; return 0; }
gpl-2.0
mathias-baumann-sociomantic/libgcrypt
mpi/mpi-gcd.c
19
1529
/* mpi-gcd.c - MPI functions * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include "mpi-internal.h" /**************** * Find the greatest common divisor G of A and B. * Return: true if this 1, false in all other cases */ int _gcry_mpi_gcd (gcry_mpi_t g, gcry_mpi_t xa, gcry_mpi_t xb) { gcry_mpi_t a, b; a = mpi_copy(xa); b = mpi_copy(xb); /* TAOCP Vol II, 4.5.2, Algorithm A */ a->sign = 0; b->sign = 0; while (mpi_cmp_ui (b, 0)) { _gcry_mpi_fdiv_r( g, a, b ); /* G is used as temporary variable. */ mpi_set(a,b); mpi_set(b,g); } mpi_set(g, a); mpi_free(a); mpi_free(b); return !mpi_cmp_ui( g, 1); }
gpl-2.0
kavishme/kvm
arch/arm/mach-tegra/hotplug.c
275
1840
/* * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * Copyright (c) 2010, 2012-2013, NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk/tegra.h> #include <linux/kernel.h> #include <linux/smp.h> #include <soc/tegra/common.h> #include <soc/tegra/fuse.h> #include <asm/smp_plat.h> #include "sleep.h" static void (*tegra_hotplug_shutdown)(void); int tegra_cpu_kill(unsigned cpu) { cpu = cpu_logical_map(cpu); /* Clock gate the CPU */ tegra_wait_cpu_in_reset(cpu); tegra_disable_cpu_clock(cpu); return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void tegra_cpu_die(unsigned int cpu) { if (!tegra_hotplug_shutdown) { WARN(1, "hotplug is not yet initialized\n"); return; } /* Clean L1 data cache */ tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS); /* Shut down the current CPU. */ tegra_hotplug_shutdown(); /* Should never return here. */ BUG(); } static int __init tegra_hotplug_init(void) { if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) return 0; if (!soc_is_tegra()) return 0; if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_get_chip_id() == TEGRA20) tegra_hotplug_shutdown = tegra20_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) && tegra_get_chip_id() == TEGRA30) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) && tegra_get_chip_id() == TEGRA114) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; if (IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) && tegra_get_chip_id() == TEGRA124) tegra_hotplug_shutdown = tegra30_hotplug_shutdown; return 0; } pure_initcall(tegra_hotplug_init);
gpl-2.0
AntonGitName/au-linux-kernel-spring-2016
linux/arch/x86/kernel/vm86_32.c
275
23308
/* * Copyright (C) 1994 Linus Torvalds * * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 * stack - Manfred Spraul <manfred@colorfullife.com> * * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle * them correctly. Now the emulation will be in a * consistent state after stackfaults - Kasper Dupont * <kasperd@daimi.au.dk> * * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont * <kasperd@daimi.au.dk> * * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault * caused by Kasper Dupont's changes - Stas Sergeev * * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. * Kasper Dupont <kasperd@daimi.au.dk> * * 9 apr 2002 - Changed stack access macros to jump to a label * instead of returning to userspace. This simplifies * do_int, and is needed by handle_vm6_fault. Kasper * Dupont <kasperd@daimi.au.dk> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/capability.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/audit.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/security.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/vm86.h> /* * Known problems: * * Interrupt handling is not guaranteed: * - a real x86 will disable all interrupts for one instruction * after a "mov ss,xx" to make stack handling atomic even without * the 'lss' instruction. We can't guarantee this in v86 mode, * as the next instruction might result in a page fault or similar. * - a real x86 will have interrupts disabled for one instruction * past the 'sti' that enables them. We don't bother with all the * details yet. * * Let's hope these problems do not actually matter for anything. */ /* * 8- and 16-bit register defines.. */ #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) /* * virtual flags (16 and 32-bit versions) */ #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) #define VEFLAGS (current->thread.vm86->veflags) #define set_flags(X, new, mask) \ ((X) = ((X) & ~(mask)) | ((new) & (mask))) #define SAFE_MASK (0xDD5) #define RETURN_MASK (0xDFF) void save_v86_state(struct kernel_vm86_regs *regs, int retval) { struct tss_struct *tss; struct task_struct *tsk = current; struct vm86plus_struct __user *user; struct vm86 *vm86 = current->thread.vm86; long err = 0; /* * This gets called from entry.S with interrupts disabled, but * from process context. Enable interrupts here, before trying * to access user space. */ local_irq_enable(); if (!vm86 || !vm86->user_vm86) { pr_alert("no user_vm86: BAD\n"); do_exit(SIGSEGV); } set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); user = vm86->user_vm86; if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ? sizeof(struct vm86plus_struct) : sizeof(struct vm86_struct))) { pr_alert("could not access userspace vm86 info\n"); do_exit(SIGSEGV); } put_user_try { put_user_ex(regs->pt.bx, &user->regs.ebx); put_user_ex(regs->pt.cx, &user->regs.ecx); put_user_ex(regs->pt.dx, &user->regs.edx); put_user_ex(regs->pt.si, &user->regs.esi); put_user_ex(regs->pt.di, &user->regs.edi); put_user_ex(regs->pt.bp, &user->regs.ebp); put_user_ex(regs->pt.ax, &user->regs.eax); put_user_ex(regs->pt.ip, &user->regs.eip); put_user_ex(regs->pt.cs, &user->regs.cs); put_user_ex(regs->pt.flags, &user->regs.eflags); put_user_ex(regs->pt.sp, &user->regs.esp); put_user_ex(regs->pt.ss, &user->regs.ss); put_user_ex(regs->es, &user->regs.es); put_user_ex(regs->ds, &user->regs.ds); put_user_ex(regs->fs, &user->regs.fs); put_user_ex(regs->gs, &user->regs.gs); put_user_ex(vm86->screen_bitmap, &user->screen_bitmap); } put_user_catch(err); if (err) { pr_alert("could not access userspace vm86 info\n"); do_exit(SIGSEGV); } tss = &per_cpu(cpu_tss, get_cpu()); tsk->thread.sp0 = vm86->saved_sp0; tsk->thread.sysenter_cs = __KERNEL_CS; load_sp0(tss, &tsk->thread); vm86->saved_sp0 = 0; put_cpu(); memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs)); lazy_load_gs(vm86->regs32.gs); regs->pt.ax = retval; } static void mark_screen_rdonly(struct mm_struct *mm) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; int i; down_write(&mm->mmap_sem); pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; pud = pud_offset(pgd, 0xA0000); if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); split_huge_page_pmd_mm(mm, 0xA0000, pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); for (i = 0; i < 32; i++) { if (pte_present(*pte)) set_pte(pte, pte_wrprotect(*pte)); pte++; } pte_unmap_unlock(pte, ptl); out: up_write(&mm->mmap_sem); flush_tlb(); } static int do_vm86_irq_handling(int subfunction, int irqnumber); static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus); SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86) { return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false); } SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { switch (cmd) { case VM86_REQUEST_IRQ: case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error * from access_ok(), because the subfunction is * interpreted as (invalid) address to vm86_struct. * So the installation check works. */ return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ return do_sys_vm86((struct vm86plus_struct __user *) arg, true); } static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) { struct tss_struct *tss; struct task_struct *tsk = current; struct vm86 *vm86 = tsk->thread.vm86; struct kernel_vm86_regs vm86regs; struct pt_regs *regs = current_pt_regs(); unsigned long err = 0; err = security_mmap_addr(0); if (err) { /* * vm86 cannot virtualize the address space, so vm86 users * need to manage the low 1MB themselves using mmap. Given * that BIOS places important data in the first page, vm86 * is essentially useless if mmap_min_addr != 0. DOSEMU, * for example, won't even bother trying to use vm86 if it * can't map a page at virtual address 0. * * To reduce the available kernel attack surface, simply * disallow vm86(old) for users who cannot mmap at va 0. * * The implementation of security_mmap_addr will allow * suitably privileged users to map va 0 even if * vm.mmap_min_addr is set above 0, and we want this * behavior for vm86 as well, as it ensures that legacy * tools like vbetool will not fail just because of * vm.mmap_min_addr. */ pr_info_once("Denied a call to vm86(old) from %s[%d] (uid: %d). Set the vm.mmap_min_addr sysctl to 0 and/or adjust LSM mmap_min_addr policy to enable vm86 if you are using a vm86-based DOS emulator.\n", current->comm, task_pid_nr(current), from_kuid_munged(&init_user_ns, current_uid())); return -EPERM; } if (!vm86) { if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL))) return -ENOMEM; tsk->thread.vm86 = vm86; } if (vm86->saved_sp0) return -EPERM; if (!access_ok(VERIFY_READ, user_vm86, plus ? sizeof(struct vm86_struct) : sizeof(struct vm86plus_struct))) return -EFAULT; memset(&vm86regs, 0, sizeof(vm86regs)); get_user_try { unsigned short seg; get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx); get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx); get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx); get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi); get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi); get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp); get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax); get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip); get_user_ex(seg, &user_vm86->regs.cs); vm86regs.pt.cs = seg; get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags); get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp); get_user_ex(seg, &user_vm86->regs.ss); vm86regs.pt.ss = seg; get_user_ex(vm86regs.es, &user_vm86->regs.es); get_user_ex(vm86regs.ds, &user_vm86->regs.ds); get_user_ex(vm86regs.fs, &user_vm86->regs.fs); get_user_ex(vm86regs.gs, &user_vm86->regs.gs); get_user_ex(vm86->flags, &user_vm86->flags); get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap); get_user_ex(vm86->cpu_type, &user_vm86->cpu_type); } get_user_catch(err); if (err) return err; if (copy_from_user(&vm86->int_revectored, &user_vm86->int_revectored, sizeof(struct revectored_struct))) return -EFAULT; if (copy_from_user(&vm86->int21_revectored, &user_vm86->int21_revectored, sizeof(struct revectored_struct))) return -EFAULT; if (plus) { if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus, sizeof(struct vm86plus_info_struct))) return -EFAULT; vm86->vm86plus.is_vm86pus = 1; } else memset(&vm86->vm86plus, 0, sizeof(struct vm86plus_info_struct)); memcpy(&vm86->regs32, regs, sizeof(struct pt_regs)); vm86->user_vm86 = user_vm86; /* * The flags register is also special: we cannot trust that the user * has set it up safely, so this makes sure interrupt etc flags are * inherited from protected mode. */ VEFLAGS = vm86regs.pt.flags; vm86regs.pt.flags &= SAFE_MASK; vm86regs.pt.flags |= regs->flags & ~SAFE_MASK; vm86regs.pt.flags |= X86_VM_MASK; vm86regs.pt.orig_ax = regs->orig_ax; switch (vm86->cpu_type) { case CPU_286: vm86->veflags_mask = 0; break; case CPU_386: vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; case CPU_486: vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; default: vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; break; } /* * Save old state */ vm86->saved_sp0 = tsk->thread.sp0; lazy_save_gs(vm86->regs32.gs); tss = &per_cpu(cpu_tss, get_cpu()); /* make room for real-mode segments */ tsk->thread.sp0 += 16; if (cpu_has_sep) tsk->thread.sysenter_cs = 0; load_sp0(tss, &tsk->thread); put_cpu(); if (vm86->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk->mm); memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs)); force_iret(); return regs->ax; } static inline void set_IF(struct kernel_vm86_regs *regs) { VEFLAGS |= X86_EFLAGS_VIF; } static inline void clear_IF(struct kernel_vm86_regs *regs) { VEFLAGS &= ~X86_EFLAGS_VIF; } static inline void clear_TF(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_TF; } static inline void clear_AC(struct kernel_vm86_regs *regs) { regs->pt.flags &= ~X86_EFLAGS_AC; } /* * It is correct to call set_IF(regs) from the set_vflags_* * functions. However someone forgot to call clear_IF(regs) * in the opposite case. * After the command sequence CLI PUSHF STI POPF you should * end up with interrupts disabled, but you ended up with * interrupts enabled. * ( I was testing my own changes, but the only bug I * could find was in a function I had not changed. ) * [KD] */ static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) { set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) { set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); set_flags(regs->pt.flags, flags, SAFE_MASK); if (flags & X86_EFLAGS_IF) set_IF(regs); else clear_IF(regs); } static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) { unsigned long flags = regs->pt.flags & RETURN_MASK; if (VEFLAGS & X86_EFLAGS_VIF) flags |= X86_EFLAGS_IF; flags |= X86_EFLAGS_IOPL; return flags | (VEFLAGS & current->thread.vm86->veflags_mask); } static inline int is_revectored(int nr, struct revectored_struct *bitmap) { __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" :"=r" (nr) :"m" (*bitmap), "r" (nr)); return nr; } #define val_byte(val, n) (((__u8 *)&val)[n]) #define pushb(base, ptr, val, err_label) \ do { \ __u8 __val = val; \ ptr--; \ if (put_user(__val, base + ptr) < 0) \ goto err_label; \ } while (0) #define pushw(base, ptr, val, err_label) \ do { \ __u16 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define pushl(base, ptr, val, err_label) \ do { \ __u32 __val = val; \ ptr--; \ if (put_user(val_byte(__val, 3), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 2), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 1), base + ptr) < 0) \ goto err_label; \ ptr--; \ if (put_user(val_byte(__val, 0), base + ptr) < 0) \ goto err_label; \ } while (0) #define popb(base, ptr, err_label) \ ({ \ __u8 __res; \ if (get_user(__res, base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popw(base, ptr, err_label) \ ({ \ __u16 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) #define popl(base, ptr, err_label) \ ({ \ __u32 __res; \ if (get_user(val_byte(__res, 0), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 1), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 2), base + ptr) < 0) \ goto err_label; \ ptr++; \ if (get_user(val_byte(__res, 3), base + ptr) < 0) \ goto err_label; \ ptr++; \ __res; \ }) /* There are so many possible reasons for this function to return * VM86_INTx, so adding another doesn't bother me. We can expect * userspace programs to be able to handle it. (Getting a problem * in userspace is always better than an Oops anyway.) [KD] */ static void do_int(struct kernel_vm86_regs *regs, int i, unsigned char __user *ssp, unsigned short sp) { unsigned long __user *intr_ptr; unsigned long segoffs; struct vm86 *vm86 = current->thread.vm86; if (regs->pt.cs == BIOSSEG) goto cannot_handle; if (is_revectored(i, &vm86->int_revectored)) goto cannot_handle; if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored)) goto cannot_handle; intr_ptr = (unsigned long __user *) (i << 2); if (get_user(segoffs, intr_ptr)) goto cannot_handle; if ((segoffs >> 16) == BIOSSEG) goto cannot_handle; pushw(ssp, sp, get_vflags(regs), cannot_handle); pushw(ssp, sp, regs->pt.cs, cannot_handle); pushw(ssp, sp, IP(regs), cannot_handle); regs->pt.cs = segoffs >> 16; SP(regs) -= 6; IP(regs) = segoffs & 0xffff; clear_TF(regs); clear_IF(regs); clear_AC(regs); return; cannot_handle: save_v86_state(regs, VM86_INTx + (i << 8)); } int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { struct vm86 *vm86 = current->thread.vm86; if (vm86->vm86plus.is_vm86pus) { if ((trapno == 3) || (trapno == 1)) { save_v86_state(regs, VM86_TRAP + (trapno << 8)); return 0; } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } if (trapno != 1) return 1; /* we let this handle by the calling routine */ current->thread.trap_nr = trapno; current->thread.error_code = error_code; force_sig(SIGTRAP, current); return 0; } void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) { unsigned char opcode; unsigned char __user *csp; unsigned char __user *ssp; unsigned short ip, sp, orig_flags; int data32, pref_done; struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus; #define CHECK_IF_IN_TRAP \ if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \ newflags |= X86_EFLAGS_TF orig_flags = *(unsigned short *)&regs->pt.flags; csp = (unsigned char __user *) (regs->pt.cs << 4); ssp = (unsigned char __user *) (regs->pt.ss << 4); sp = SP(regs); ip = IP(regs); data32 = 0; pref_done = 0; do { switch (opcode = popb(csp, ip, simulate_sigsegv)) { case 0x66: /* 32-bit data */ data32 = 1; break; case 0x67: /* 32-bit address */ break; case 0x2e: /* CS */ break; case 0x3e: /* DS */ break; case 0x26: /* ES */ break; case 0x36: /* SS */ break; case 0x65: /* GS */ break; case 0x64: /* FS */ break; case 0xf2: /* repnz */ break; case 0xf3: /* rep */ break; default: pref_done = 1; } } while (!pref_done); switch (opcode) { /* pushf */ case 0x9c: if (data32) { pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 4; } else { pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); SP(regs) -= 2; } IP(regs) = ip; goto vm86_fault_return; /* popf */ case 0x9d: { unsigned long newflags; if (data32) { newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 4; } else { newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 2; } IP(regs) = ip; CHECK_IF_IN_TRAP; if (data32) set_vflags_long(newflags, regs); else set_vflags_short(newflags, regs); goto check_vip; } /* int xx */ case 0xcd: { int intno = popb(csp, ip, simulate_sigsegv); IP(regs) = ip; if (vmpi->vm86dbg_active) { if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) { save_v86_state(regs, VM86_INTx + (intno << 8)); return; } } do_int(regs, intno, ssp, sp); return; } /* iret */ case 0xcf: { unsigned long newip; unsigned long newcs; unsigned long newflags; if (data32) { newip = popl(ssp, sp, simulate_sigsegv); newcs = popl(ssp, sp, simulate_sigsegv); newflags = popl(ssp, sp, simulate_sigsegv); SP(regs) += 12; } else { newip = popw(ssp, sp, simulate_sigsegv); newcs = popw(ssp, sp, simulate_sigsegv); newflags = popw(ssp, sp, simulate_sigsegv); SP(regs) += 6; } IP(regs) = newip; regs->pt.cs = newcs; CHECK_IF_IN_TRAP; if (data32) { set_vflags_long(newflags, regs); } else { set_vflags_short(newflags, regs); } goto check_vip; } /* cli */ case 0xfa: IP(regs) = ip; clear_IF(regs); goto vm86_fault_return; /* sti */ /* * Damn. This is incorrect: the 'sti' instruction should actually * enable interrupts after the /next/ instruction. Not good. * * Probably needs some horsing around with the TF flag. Aiee.. */ case 0xfb: IP(regs) = ip; set_IF(regs); goto check_vip; default: save_v86_state(regs, VM86_UNKNOWN); } return; check_vip: if (VEFLAGS & X86_EFLAGS_VIP) { save_v86_state(regs, VM86_STI); return; } vm86_fault_return: if (vmpi->force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) { save_v86_state(regs, VM86_PICRETURN); return; } if (orig_flags & X86_EFLAGS_TF) handle_vm86_trap(regs, 0, X86_TRAP_DB); return; simulate_sigsegv: /* FIXME: After a long discussion with Stas we finally * agreed, that this is wrong. Here we should * really send a SIGSEGV to the user program. * But how do we create the correct context? We * are inside a general protection fault handler * and has just returned from a page fault handler. * The correct context for the signal handler * should be a mixture of the two, but how do we * get the information? [KD] */ save_v86_state(regs, VM86_UNKNOWN); } /* ---------------- vm86 special IRQ passing stuff ----------------- */ #define VM86_IRQNAME "vm86irq" static struct vm86_irqs { struct task_struct *tsk; int sig; } vm86_irqs[16]; static DEFINE_SPINLOCK(irqbits_lock); static int irqbits; #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | (1 << SIGUNUSED)) static irqreturn_t irq_handler(int intno, void *dev_id) { int irq_bit; unsigned long flags; spin_lock_irqsave(&irqbits_lock, flags); irq_bit = 1 << intno; if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) goto out; irqbits |= irq_bit; if (vm86_irqs[intno].sig) send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); /* * IRQ will be re-enabled when user asks for the irq (whether * polling or as a result of the signal) */ disable_irq_nosync(intno); spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_HANDLED; out: spin_unlock_irqrestore(&irqbits_lock, flags); return IRQ_NONE; } static inline void free_vm86_irq(int irqnumber) { unsigned long flags; free_irq(irqnumber, NULL); vm86_irqs[irqnumber].tsk = NULL; spin_lock_irqsave(&irqbits_lock, flags); irqbits &= ~(1 << irqnumber); spin_unlock_irqrestore(&irqbits_lock, flags); } void release_vm86_irqs(struct task_struct *task) { int i; for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) if (vm86_irqs[i].tsk == task) free_vm86_irq(i); } static inline int get_and_reset_irq(int irqnumber) { int bit; unsigned long flags; int ret = 0; if (invalid_vm86_irq(irqnumber)) return 0; if (vm86_irqs[irqnumber].tsk != current) return 0; spin_lock_irqsave(&irqbits_lock, flags); bit = irqbits & (1 << irqnumber); irqbits &= ~bit; if (bit) { enable_irq(irqnumber); ret = 1; } spin_unlock_irqrestore(&irqbits_lock, flags); return ret; } static int do_vm86_irq_handling(int subfunction, int irqnumber) { int ret; switch (subfunction) { case VM86_GET_AND_RESET_IRQ: { return get_and_reset_irq(irqnumber); } case VM86_GET_IRQ_BITS: { return irqbits; } case VM86_REQUEST_IRQ: { int sig = irqnumber >> 8; int irq = irqnumber & 255; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; if (invalid_vm86_irq(irq)) return -EPERM; if (vm86_irqs[irq].tsk) return -EPERM; ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); if (ret) return ret; vm86_irqs[irq].sig = sig; vm86_irqs[irq].tsk = current; return irq; } case VM86_FREE_IRQ: { if (invalid_vm86_irq(irqnumber)) return -EPERM; if (!vm86_irqs[irqnumber].tsk) return 0; if (vm86_irqs[irqnumber].tsk != current) return -EPERM; free_vm86_irq(irqnumber); return 0; } } return -EINVAL; }
gpl-2.0
sandymanu/android_kernel_xiaomi_kenzo
fs/proc/fd.c
275
8320
#include <linux/sched.h> #include <linux/errno.h> #include <linux/dcache.h> #include <linux/path.h> #include <linux/fdtable.h> #include <linux/namei.h> #include <linux/pid.h> #include <linux/security.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "internal.h" #include "fd.h" static int seq_show(struct seq_file *m, void *v) { struct files_struct *files = NULL; int f_flags = 0, ret = -ENOENT; struct file *file = NULL; struct task_struct *task; task = get_proc_task(m->private); if (!task) return -ENOENT; files = get_files_struct(task); put_task_struct(task); if (files) { int fd = proc_fd(m->private); spin_lock(&files->file_lock); file = fcheck_files(files, fd); if (file) { struct fdtable *fdt = files_fdtable(files); f_flags = file->f_flags; if (close_on_exec(fd, fdt)) f_flags |= O_CLOEXEC; get_file(file); ret = 0; } spin_unlock(&files->file_lock); put_files_struct(files); } if (!ret) { seq_printf(m, "pos:\t%lli\nflags:\t0%o\n", (long long)file->f_pos, f_flags); if (file->f_op->show_fdinfo) ret = file->f_op->show_fdinfo(m, file); fput(file); } return ret; } static int seq_fdinfo_open(struct inode *inode, struct file *file) { return single_open(file, seq_show, inode); } static const struct file_operations proc_fdinfo_file_operations = { .open = seq_fdinfo_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) { struct files_struct *files; struct task_struct *task; const struct cred *cred; struct inode *inode; int fd; if (flags & LOOKUP_RCU) return -ECHILD; inode = dentry->d_inode; task = get_proc_task(inode); fd = proc_fd(inode); if (task) { files = get_files_struct(task); if (files) { struct file *file; rcu_read_lock(); file = fcheck_files(files, fd); if (file) { unsigned f_mode = file->f_mode; rcu_read_unlock(); put_files_struct(files); if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; } if (S_ISLNK(inode->i_mode)) { unsigned i_mode = S_IFLNK; if (f_mode & FMODE_READ) i_mode |= S_IRUSR | S_IXUSR; if (f_mode & FMODE_WRITE) i_mode |= S_IWUSR | S_IXUSR; inode->i_mode = i_mode; } security_task_to_inode(task, inode); put_task_struct(task); return 1; } rcu_read_unlock(); put_files_struct(files); } put_task_struct(task); } d_drop(dentry); return 0; } static const struct dentry_operations tid_fd_dentry_operations = { .d_revalidate = tid_fd_revalidate, .d_delete = pid_delete_dentry, }; static int proc_fd_link(struct dentry *dentry, struct path *path) { struct files_struct *files = NULL; struct task_struct *task; int ret = -ENOENT; task = get_proc_task(dentry->d_inode); if (task) { files = get_files_struct(task); put_task_struct(task); } if (files) { int fd = proc_fd(dentry->d_inode); struct file *fd_file; spin_lock(&files->file_lock); fd_file = fcheck_files(files, fd); if (fd_file) { *path = fd_file->f_path; path_get(&fd_file->f_path); ret = 0; } spin_unlock(&files->file_lock); put_files_struct(files); } return ret; } static struct dentry * proc_fd_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; inode->i_mode = S_IFLNK; inode->i_op = &proc_pid_link_inode_operations; inode->i_size = 64; ei->op.proc_get_link = proc_fd_link; d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) error = NULL; out: return error; } static struct dentry *proc_lookupfd_common(struct inode *dir, struct dentry *dentry, instantiate_t instantiate) { struct task_struct *task = get_proc_task(dir); struct dentry *result = ERR_PTR(-ENOENT); unsigned fd = name_to_int(dentry); if (!task) goto out_no_task; if (fd == ~0U) goto out; result = instantiate(dir, dentry, task, (void *)(unsigned long)fd); out: put_task_struct(task); out_no_task: return result; } static int proc_readfd_common(struct file * filp, void * dirent, filldir_t filldir, instantiate_t instantiate) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct task_struct *p = get_proc_task(inode); struct files_struct *files; unsigned int fd, ino; int retval; retval = -ENOENT; if (!p) goto out_no_task; retval = 0; fd = filp->f_pos; switch (fd) { case 0: if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) goto out; filp->f_pos++; case 1: ino = parent_ino(dentry); if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) goto out; filp->f_pos++; default: files = get_files_struct(p); if (!files) goto out; rcu_read_lock(); for (fd = filp->f_pos - 2; fd < files_fdtable(files)->max_fds; fd++, filp->f_pos++) { char name[PROC_NUMBUF]; int len; int rv; if (!fcheck_files(files, fd)) continue; rcu_read_unlock(); len = snprintf(name, sizeof(name), "%d", fd); rv = proc_fill_cache(filp, dirent, filldir, name, len, instantiate, p, (void *)(unsigned long)fd); if (rv < 0) goto out_fd_loop; rcu_read_lock(); } rcu_read_unlock(); out_fd_loop: put_files_struct(files); } out: put_task_struct(p); out_no_task: return retval; } static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) { return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); } const struct file_operations proc_fd_operations = { .read = generic_read_dir, .readdir = proc_readfd, .llseek = default_llseek, }; static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); } /* * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ int proc_fd_permission(struct inode *inode, int mask) { struct task_struct *p; int rv; rv = generic_permission(inode, mask); if (rv == 0) return rv; rcu_read_lock(); p = pid_task(proc_pid(inode), PIDTYPE_PID); if (p && same_thread_group(p, current)) rv = 0; rcu_read_unlock(); return rv; } const struct inode_operations proc_fd_inode_operations = { .lookup = proc_lookupfd, .permission = proc_fd_permission, .setattr = proc_setattr, }; static struct dentry * proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); unsigned fd = (unsigned long)ptr; struct proc_inode *ei; struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; inode->i_mode = S_IFREG | S_IRUSR; inode->i_fop = &proc_fdinfo_file_operations; d_set_d_op(dentry, &tid_fd_dentry_operations); d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, 0)) error = NULL; out: return error; } static struct dentry * proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, unsigned int flags) { return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) { return proc_readfd_common(filp, dirent, filldir, proc_fdinfo_instantiate); } const struct inode_operations proc_fdinfo_inode_operations = { .lookup = proc_lookupfdinfo, .setattr = proc_setattr, }; const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, .readdir = proc_readfdinfo, .llseek = default_llseek, };
gpl-2.0
Evil-Green/Lonas_KL
arch/arm/mach-s3c64xx/mach-smdk6410.c
531
17889
/* linux/arch/arm/mach-s3c64xx/mach-smdk6410.c * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/input.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/leds.h> #include <linux/fb.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/smsc911x.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> #include <linux/pwm_backlight.h> #ifdef CONFIG_SMDK6410_WM1190_EV1 #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/pmic.h> #endif #ifdef CONFIG_SMDK6410_WM1192_EV1 #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #endif #include <video/platform_lcd.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <plat/regs-serial.h> #include <mach/regs-modem.h> #include <mach/regs-gpio.h> #include <mach/regs-sys.h> #include <mach/regs-srom.h> #include <plat/ata.h> #include <plat/iic.h> #include <plat/fb.h> #include <plat/gpio-cfg.h> #include <mach/s3c6410.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/keypad.h> #include <plat/backlight.h> #include <plat/regs-fb-v4.h> #define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK #define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB #define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE static struct s3c2410_uartcfg smdk6410_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [2] = { .hwport = 2, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, [3] = { .hwport = 3, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, }, }; /* framebuffer and LCD setup. */ /* GPF15 = LCD backlight control * GPF13 => Panel power * GPN5 = LCD nRESET signal * PWM_TOUT1 => backlight brightness */ static void smdk6410_lcd_power_set(struct plat_lcd_data *pd, unsigned int power) { if (power) { gpio_direction_output(S3C64XX_GPF(13), 1); /* fire nRESET on power up */ gpio_direction_output(S3C64XX_GPN(5), 0); msleep(10); gpio_direction_output(S3C64XX_GPN(5), 1); msleep(1); } else { gpio_direction_output(S3C64XX_GPF(13), 0); } } static struct plat_lcd_data smdk6410_lcd_power_data = { .set_power = smdk6410_lcd_power_set, }; static struct platform_device smdk6410_lcd_powerdev = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6410_lcd_power_data, }; static struct s3c_fb_pd_win smdk6410_fb_win0 = { /* this is to ensure we use win0 */ .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 16, .virtual_y = 480 * 2, .virtual_x = 800, }; /* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = { .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, .win[0] = &smdk6410_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, }; /* * Configuring Ethernet on SMDK6410 * * Both CS8900A and LAN9115 chips share one chip select mediated by CFG6. * The constant address below corresponds to nCS1 * * 1) Set CFGB2 p3 ON others off, no other CFGB selects "ethernet" * 2) CFG6 needs to be switched to "LAN9115" side */ static struct resource smdk6410_smsc911x_resources[] = { [0] = { .start = S3C64XX_PA_XM0CSN1, .end = S3C64XX_PA_XM0CSN1 + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = S3C_EINT(10), .end = S3C_EINT(10), .flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW, }, }; static struct smsc911x_platform_config smdk6410_smsc911x_pdata = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .flags = SMSC911X_USE_32BIT | SMSC911X_FORCE_INTERNAL_PHY, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smdk6410_smsc911x = { .name = "smsc911x", .id = -1, .num_resources = ARRAY_SIZE(smdk6410_smsc911x_resources), .resource = &smdk6410_smsc911x_resources[0], .dev = { .platform_data = &smdk6410_smsc911x_pdata, }, }; #ifdef CONFIG_REGULATOR static struct regulator_consumer_supply smdk6410_b_pwr_5v_consumers[] __initdata = { REGULATOR_SUPPLY("PVDD", "0-001b"), REGULATOR_SUPPLY("AVDD", "0-001b"), }; static struct regulator_init_data smdk6410_b_pwr_5v_data = { .constraints = { .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(smdk6410_b_pwr_5v_consumers), .consumer_supplies = smdk6410_b_pwr_5v_consumers, }; static struct fixed_voltage_config smdk6410_b_pwr_5v_pdata = { .supply_name = "B_PWR_5V", .microvolts = 5000000, .init_data = &smdk6410_b_pwr_5v_data, .gpio = -EINVAL, }; static struct platform_device smdk6410_b_pwr_5v = { .name = "reg-fixed-voltage", .id = -1, .dev = { .platform_data = &smdk6410_b_pwr_5v_pdata, }, }; #endif static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = { .setup_gpio = s3c64xx_ide_setup_gpio, }; static uint32_t smdk6410_keymap[] __initdata = { /* KEY(row, col, keycode) */ KEY(0, 3, KEY_1), KEY(0, 4, KEY_2), KEY(0, 5, KEY_3), KEY(0, 6, KEY_4), KEY(0, 7, KEY_5), KEY(1, 3, KEY_A), KEY(1, 4, KEY_B), KEY(1, 5, KEY_C), KEY(1, 6, KEY_D), KEY(1, 7, KEY_E) }; static struct matrix_keymap_data smdk6410_keymap_data __initdata = { .keymap = smdk6410_keymap, .keymap_size = ARRAY_SIZE(smdk6410_keymap), }; static struct samsung_keypad_platdata smdk6410_keypad_data __initdata = { .keymap_data = &smdk6410_keymap_data, .rows = 2, .cols = 8, }; static int smdk6410_backlight_init(struct device *dev) { int ret; ret = gpio_request(S3C64XX_GPF(15), "Backlight"); if (ret) { printk(KERN_ERR "failed to request GPF for PWM-OUT1\n"); return ret; } /* Configure GPIO pin with S3C64XX_GPF15_PWM_TOUT1 */ s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_SFN(2)); return 0; } static void smdk6410_backlight_exit(struct device *dev) { s3c_gpio_cfgpin(S3C64XX_GPF(15), S3C_GPIO_OUTPUT); gpio_free(S3C64XX_GPF(15)); } static struct platform_pwm_backlight_data smdk6410_backlight_data = { .pwm_id = 1, .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 78770, .init = smdk6410_backlight_init, .exit = smdk6410_backlight_exit, }; static struct platform_device smdk6410_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &s3c_device_timer[1].dev, .platform_data = &smdk6410_backlight_data, }, }; static struct map_desc smdk6410_iodesc[] = {}; static struct platform_device *smdk6410_devices[] __initdata = { #ifdef CONFIG_SMDK6410_SD_CH0 &s3c_device_hsmmc0, #endif #ifdef CONFIG_SMDK6410_SD_CH1 &s3c_device_hsmmc1, #endif &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_fb, &s3c_device_ohci, &s3c_device_usb_hsotg, &samsung_asoc_dma, &s3c64xx_device_iisv4, &samsung_device_keypad, #ifdef CONFIG_REGULATOR &smdk6410_b_pwr_5v, #endif &smdk6410_lcd_powerdev, &smdk6410_smsc911x, &s3c_device_adc, &s3c_device_cfcon, &s3c_device_rtc, &s3c_device_ts, &s3c_device_wdt, }; #ifdef CONFIG_REGULATOR /* ARM core */ static struct regulator_consumer_supply smdk6410_vddarm_consumers[] = { REGULATOR_SUPPLY("vddarm", NULL), }; /* VDDARM, BUCK1 on J5 */ static struct regulator_init_data smdk6410_vddarm = { .constraints = { .name = "PVDD_ARM", .min_uV = 1000000, .max_uV = 1300000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, .num_consumer_supplies = ARRAY_SIZE(smdk6410_vddarm_consumers), .consumer_supplies = smdk6410_vddarm_consumers, }; /* VDD_INT, BUCK2 on J5 */ static struct regulator_init_data smdk6410_vddint = { .constraints = { .name = "PVDD_INT", .min_uV = 1000000, .max_uV = 1200000, .always_on = 1, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, }, }; /* VDD_HI, LDO3 on J5 */ static struct regulator_init_data smdk6410_vddhi = { .constraints = { .name = "PVDD_HI", .always_on = 1, }, }; /* VDD_PLL, LDO2 on J5 */ static struct regulator_init_data smdk6410_vddpll = { .constraints = { .name = "PVDD_PLL", .always_on = 1, }, }; /* VDD_UH_MMC, LDO5 on J5 */ static struct regulator_init_data smdk6410_vdduh_mmc = { .constraints = { .name = "PVDD_UH+PVDD_MMC", .always_on = 1, }, }; /* VCCM3BT, LDO8 on J5 */ static struct regulator_init_data smdk6410_vccmc3bt = { .constraints = { .name = "PVCCM3BT", .always_on = 1, }, }; /* VCCM2MTV, LDO11 on J5 */ static struct regulator_init_data smdk6410_vccm2mtv = { .constraints = { .name = "PVCCM2MTV", .always_on = 1, }, }; /* VDD_LCD, LDO12 on J5 */ static struct regulator_init_data smdk6410_vddlcd = { .constraints = { .name = "PVDD_LCD", .always_on = 1, }, }; /* VDD_OTGI, LDO9 on J5 */ static struct regulator_init_data smdk6410_vddotgi = { .constraints = { .name = "PVDD_OTGI", .always_on = 1, }, }; /* VDD_OTG, LDO14 on J5 */ static struct regulator_init_data smdk6410_vddotg = { .constraints = { .name = "PVDD_OTG", .always_on = 1, }, }; /* VDD_ALIVE, LDO15 on J5 */ static struct regulator_init_data smdk6410_vddalive = { .constraints = { .name = "PVDD_ALIVE", .always_on = 1, }, }; /* VDD_AUDIO, VLDO_AUDIO on J5 */ static struct regulator_init_data smdk6410_vddaudio = { .constraints = { .name = "PVDD_AUDIO", .always_on = 1, }, }; #endif #ifdef CONFIG_SMDK6410_WM1190_EV1 /* S3C64xx internal logic & PLL */ static struct regulator_init_data wm8350_dcdc1_data = { .constraints = { .name = "PVDD_INT+PVDD_PLL", .min_uV = 1200000, .max_uV = 1200000, .always_on = 1, .apply_uV = 1, }, }; /* Memory */ static struct regulator_init_data wm8350_dcdc3_data = { .constraints = { .name = "PVDD_MEM", .min_uV = 1800000, .max_uV = 1800000, .always_on = 1, .state_mem = { .uV = 1800000, .mode = REGULATOR_MODE_NORMAL, .enabled = 1, }, .initial_state = PM_SUSPEND_MEM, }, }; /* USB, EXT, PCM, ADC/DAC, USB, MMC */ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = { REGULATOR_SUPPLY("DVDD", "0-001b"), }; static struct regulator_init_data wm8350_dcdc4_data = { .constraints = { .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", .min_uV = 3000000, .max_uV = 3000000, .always_on = 1, }, .num_consumer_supplies = ARRAY_SIZE(wm8350_dcdc4_consumers), .consumer_supplies = wm8350_dcdc4_consumers, }; /* OTGi/1190-EV1 HPVDD & AVDD */ static struct regulator_init_data wm8350_ldo4_data = { .constraints = { .name = "PVDD_OTGI+HPVDD+AVDD", .min_uV = 1200000, .max_uV = 1200000, .apply_uV = 1, .always_on = 1, }, }; static struct { int regulator; struct regulator_init_data *initdata; } wm1190_regulators[] = { { WM8350_DCDC_1, &wm8350_dcdc1_data }, { WM8350_DCDC_3, &wm8350_dcdc3_data }, { WM8350_DCDC_4, &wm8350_dcdc4_data }, { WM8350_DCDC_6, &smdk6410_vddarm }, { WM8350_LDO_1, &smdk6410_vddalive }, { WM8350_LDO_2, &smdk6410_vddotg }, { WM8350_LDO_3, &smdk6410_vddlcd }, { WM8350_LDO_4, &wm8350_ldo4_data }, }; static int __init smdk6410_wm8350_init(struct wm8350 *wm8350) { int i; /* Configure the IRQ line */ s3c_gpio_setpull(S3C64XX_GPN(12), S3C_GPIO_PULL_UP); /* Instantiate the regulators */ for (i = 0; i < ARRAY_SIZE(wm1190_regulators); i++) wm8350_register_regulator(wm8350, wm1190_regulators[i].regulator, wm1190_regulators[i].initdata); return 0; } static struct wm8350_platform_data __initdata smdk6410_wm8350_pdata = { .init = smdk6410_wm8350_init, .irq_high = 1, .irq_base = IRQ_BOARD_START, }; #endif #ifdef CONFIG_SMDK6410_WM1192_EV1 static struct gpio_led wm1192_pmic_leds[] = { { .name = "PMIC:red:power", .gpio = GPIO_BOARD_START + 3, .default_state = LEDS_GPIO_DEFSTATE_ON, }, }; static struct gpio_led_platform_data wm1192_pmic_led = { .num_leds = ARRAY_SIZE(wm1192_pmic_leds), .leds = wm1192_pmic_leds, }; static struct platform_device wm1192_pmic_led_dev = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &wm1192_pmic_led, }, }; static int wm1192_pre_init(struct wm831x *wm831x) { int ret; /* Configure the IRQ line */ s3c_gpio_setpull(S3C64XX_GPN(12), S3C_GPIO_PULL_UP); ret = platform_device_register(&wm1192_pmic_led_dev); if (ret != 0) dev_err(wm831x->dev, "Failed to add PMIC LED: %d\n", ret); return 0; } static struct wm831x_backlight_pdata wm1192_backlight_pdata = { .isink = 1, .max_uA = 27554, }; static struct regulator_init_data wm1192_dcdc3 = { .constraints = { .name = "PVDD_MEM+PVDD_GPS", .always_on = 1, }, }; static struct regulator_consumer_supply wm1192_ldo1_consumers[] = { REGULATOR_SUPPLY("DVDD", "0-001b"), /* WM8580 */ }; static struct regulator_init_data wm1192_ldo1 = { .constraints = { .name = "PVDD_LCD+PVDD_EXT", .always_on = 1, }, .consumer_supplies = wm1192_ldo1_consumers, .num_consumer_supplies = ARRAY_SIZE(wm1192_ldo1_consumers), }; static struct wm831x_status_pdata wm1192_led7_pdata = { .name = "LED7:green:", }; static struct wm831x_status_pdata wm1192_led8_pdata = { .name = "LED8:green:", }; static struct wm831x_pdata smdk6410_wm1192_pdata = { .pre_init = wm1192_pre_init, .irq_base = IRQ_BOARD_START, .backlight = &wm1192_backlight_pdata, .dcdc = { &smdk6410_vddarm, /* DCDC1 */ &smdk6410_vddint, /* DCDC2 */ &wm1192_dcdc3, }, .gpio_base = GPIO_BOARD_START, .ldo = { &wm1192_ldo1, /* LDO1 */ &smdk6410_vdduh_mmc, /* LDO2 */ NULL, /* LDO3 NC */ &smdk6410_vddotgi, /* LDO4 */ &smdk6410_vddotg, /* LDO5 */ &smdk6410_vddhi, /* LDO6 */ &smdk6410_vddaudio, /* LDO7 */ &smdk6410_vccm2mtv, /* LDO8 */ &smdk6410_vddpll, /* LDO9 */ &smdk6410_vccmc3bt, /* LDO10 */ &smdk6410_vddalive, /* LDO11 */ }, .status = { &wm1192_led7_pdata, &wm1192_led8_pdata, }, }; #endif static struct i2c_board_info i2c_devs0[] __initdata = { { I2C_BOARD_INFO("24c08", 0x50), }, { I2C_BOARD_INFO("wm8580", 0x1b), }, #ifdef CONFIG_SMDK6410_WM1192_EV1 { I2C_BOARD_INFO("wm8312", 0x34), .platform_data = &smdk6410_wm1192_pdata, .irq = S3C_EINT(12), }, #endif #ifdef CONFIG_SMDK6410_WM1190_EV1 { I2C_BOARD_INFO("wm8350", 0x1a), .platform_data = &smdk6410_wm8350_pdata, .irq = S3C_EINT(12), }, #endif }; static struct i2c_board_info i2c_devs1[] __initdata = { { I2C_BOARD_INFO("24c128", 0x57), }, /* Samsung S524AD0XD1 */ }; static struct s3c2410_ts_mach_info s3c_ts_platform __initdata = { .delay = 10000, .presc = 49, .oversampling_shift = 2, }; /* LCD Backlight data */ static struct samsung_bl_gpio_info smdk6410_bl_gpio_info = { .no = S3C64XX_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6410_bl_data = { .pwm_id = 1, }; static void __init smdk6410_map_io(void) { u32 tmp; s3c64xx_init_io(smdk6410_iodesc, ARRAY_SIZE(smdk6410_iodesc)); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdk6410_uartcfgs, ARRAY_SIZE(smdk6410_uartcfgs)); /* set the LCD type */ tmp = __raw_readl(S3C64XX_SPCON); tmp &= ~S3C64XX_SPCON_LCD_SEL_MASK; tmp |= S3C64XX_SPCON_LCD_SEL_RGB; __raw_writel(tmp, S3C64XX_SPCON); /* remove the lcd bypass */ tmp = __raw_readl(S3C64XX_MODEM_MIFPCON); tmp &= ~MIFPCON_LCD_BYPASS; __raw_writel(tmp, S3C64XX_MODEM_MIFPCON); } static void __init smdk6410_machine_init(void) { u32 cs1; s3c_i2c0_set_platdata(NULL); s3c_i2c1_set_platdata(NULL); s3c_fb_set_platdata(&smdk6410_lcd_pdata); samsung_keypad_set_platdata(&smdk6410_keypad_data); s3c24xx_ts_set_platdata(&s3c_ts_platform); /* configure nCS1 width to 16 bits */ cs1 = __raw_readl(S3C64XX_SROM_BW) & ~(S3C64XX_SROM_BW__CS_MASK << S3C64XX_SROM_BW__NCS1__SHIFT); cs1 |= ((1 << S3C64XX_SROM_BW__DATAWIDTH__SHIFT) | (1 << S3C64XX_SROM_BW__WAITENABLE__SHIFT) | (1 << S3C64XX_SROM_BW__BYTEENABLE__SHIFT)) << S3C64XX_SROM_BW__NCS1__SHIFT; __raw_writel(cs1, S3C64XX_SROM_BW); /* set timing for nCS1 suitable for ethernet chip */ __raw_writel((0 << S3C64XX_SROM_BCX__PMC__SHIFT) | (6 << S3C64XX_SROM_BCX__TACP__SHIFT) | (4 << S3C64XX_SROM_BCX__TCAH__SHIFT) | (1 << S3C64XX_SROM_BCX__TCOH__SHIFT) | (0xe << S3C64XX_SROM_BCX__TACC__SHIFT) | (4 << S3C64XX_SROM_BCX__TCOS__SHIFT) | (0 << S3C64XX_SROM_BCX__TACS__SHIFT), S3C64XX_SROM_BC1); gpio_request(S3C64XX_GPN(5), "LCD power"); gpio_request(S3C64XX_GPF(13), "LCD power"); i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0)); i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1)); s3c_ide_set_platdata(&smdk6410_ide_pdata); samsung_bl_set(&smdk6410_bl_gpio_info, &smdk6410_bl_data); platform_add_devices(smdk6410_devices, ARRAY_SIZE(smdk6410_devices)); } MACHINE_START(SMDK6410, "SMDK6410") /* Maintainer: Ben Dooks <ben-linux@fluff.org> */ .boot_params = S3C64XX_PA_SDRAM + 0x100, .init_irq = s3c6410_init_irq, .map_io = smdk6410_map_io, .init_machine = smdk6410_machine_init, .timer = &s3c24xx_timer, MACHINE_END
gpl-2.0
tejaswanjari/SMR_FS-EXT4
kernel/drivers/net/wireless/iwlwifi/dvm/rxon.c
1043
45782
/****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/etherdevice.h> #include "iwl-trans.h" #include "iwl-modparams.h" #include "dev.h" #include "agn.h" #include "calib.h" /* * initialize rxon structure with default values from eeprom */ void iwl_connection_init_rx_config(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { memset(&ctx->staging, 0, sizeof(ctx->staging)); if (!ctx->vif) { ctx->staging.dev_type = ctx->unused_devtype; } else switch (ctx->vif->type) { case NL80211_IFTYPE_AP: ctx->staging.dev_type = ctx->ap_devtype; break; case NL80211_IFTYPE_STATION: ctx->staging.dev_type = ctx->station_devtype; ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; break; case NL80211_IFTYPE_ADHOC: ctx->staging.dev_type = ctx->ibss_devtype; ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK; break; case NL80211_IFTYPE_MONITOR: ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER; break; default: IWL_ERR(priv, "Unsupported interface type %d\n", ctx->vif->type); break; } #if 0 /* TODO: Figure out when short_preamble would be set and cache from * that */ if (!hw_to_local(priv->hw)->short_preamble) ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; else ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; #endif ctx->staging.channel = cpu_to_le16(priv->hw->conf.chandef.chan->hw_value); priv->band = priv->hw->conf.chandef.chan->band; iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); /* clear both MIX and PURE40 mode flag */ ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40); if (ctx->vif) memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; } static int iwlagn_disable_bss(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) { __le32 old_filter = send->filter_flags; int ret; send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, sizeof(*send), send); send->filter_flags = old_filter; if (ret) IWL_DEBUG_QUIET_RFKILL(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret); return ret; } static int iwlagn_disable_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) { struct iwl_notification_wait disable_wait; __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; static const u8 deactivate_cmd[] = { REPLY_WIPAN_DEACTIVATION_COMPLETE }; iwl_init_notification_wait(&priv->notif_wait, &disable_wait, deactivate_cmd, ARRAY_SIZE(deactivate_cmd), NULL, NULL); send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; send->dev_type = RXON_DEV_TYPE_P2P; ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, sizeof(*send), send); send->filter_flags = old_filter; send->dev_type = old_dev_type; if (ret) { IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); iwl_remove_notification(&priv->notif_wait, &disable_wait); } else { ret = iwl_wait_notification(&priv->notif_wait, &disable_wait, HZ); if (ret) IWL_ERR(priv, "Timed out waiting for PAN disable\n"); } return ret; } static int iwlagn_disconn_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) { __le32 old_filter = send->filter_flags; int ret; send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, sizeof(*send), send); send->filter_flags = old_filter; return ret; } static void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; if (!ctx->is_active) return; ctx->qos_data.def_qos_parm.qos_flags = 0; if (ctx->qos_data.qos_active) ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_UPDATE_EDCA_MSK; if (ctx->ht.enabled) ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags); ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0, sizeof(struct iwl_qosparam_cmd), &ctx->qos_data.def_qos_parm); if (ret) IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); } static int iwlagn_update_beacon(struct iwl_priv *priv, struct ieee80211_vif *vif) { lockdep_assert_held(&priv->mutex); dev_kfree_skb(priv->beacon_skb); priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif); if (!priv->beacon_skb) return -ENOMEM; return iwlagn_send_beacon_cmd(priv); } static int iwlagn_send_rxon_assoc(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret = 0; struct iwl_rxon_assoc_cmd rxon_assoc; const struct iwl_rxon_cmd *rxon1 = &ctx->staging; const struct iwl_rxon_cmd *rxon2 = &ctx->active; if ((rxon1->flags == rxon2->flags) && (rxon1->filter_flags == rxon2->filter_flags) && (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && (rxon1->ofdm_ht_single_stream_basic_rates == rxon2->ofdm_ht_single_stream_basic_rates) && (rxon1->ofdm_ht_dual_stream_basic_rates == rxon2->ofdm_ht_dual_stream_basic_rates) && (rxon1->ofdm_ht_triple_stream_basic_rates == rxon2->ofdm_ht_triple_stream_basic_rates) && (rxon1->acquisition_data == rxon2->acquisition_data) && (rxon1->rx_chain == rxon2->rx_chain) && (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n"); return 0; } rxon_assoc.flags = ctx->staging.flags; rxon_assoc.filter_flags = ctx->staging.filter_flags; rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; rxon_assoc.reserved1 = 0; rxon_assoc.reserved2 = 0; rxon_assoc.reserved3 = 0; rxon_assoc.ofdm_ht_single_stream_basic_rates = ctx->staging.ofdm_ht_single_stream_basic_rates; rxon_assoc.ofdm_ht_dual_stream_basic_rates = ctx->staging.ofdm_ht_dual_stream_basic_rates; rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; rxon_assoc.ofdm_ht_triple_stream_basic_rates = ctx->staging.ofdm_ht_triple_stream_basic_rates; rxon_assoc.acquisition_data = ctx->staging.acquisition_data; ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd, CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc); return ret; } static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) { u16 new_val; u16 beacon_factor; /* * If mac80211 hasn't given us a beacon interval, program * the default into the device (not checking this here * would cause the adjustment below to return the maximum * value, which may break PAN.) */ if (!beacon_val) return DEFAULT_BEACON_INTERVAL; /* * If the beacon interval we obtained from the peer * is too large, we'll have to wake up more often * (and in IBSS case, we'll beacon too much) * * For example, if max_beacon_val is 4096, and the * requested beacon interval is 7000, we'll have to * use 3500 to be able to wake up on the beacons. * * This could badly influence beacon detection stats. */ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; new_val = beacon_val / beacon_factor; if (!new_val) new_val = max_beacon_val; return new_val; } static int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { u64 tsf; s32 interval_tm, rem; struct ieee80211_conf *conf = NULL; u16 beacon_int; struct ieee80211_vif *vif = ctx->vif; conf = &priv->hw->conf; lockdep_assert_held(&priv->mutex); memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); ctx->timing.timestamp = cpu_to_le64(priv->timestamp); ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); beacon_int = vif ? vif->bss_conf.beacon_int : 0; /* * TODO: For IBSS we need to get atim_window from mac80211, * for now just always use 0 */ ctx->timing.atim_window = 0; if (ctx->ctxid == IWL_RXON_CTX_PAN && (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && iwl_is_associated(priv, IWL_RXON_CTX_BSS) && priv->contexts[IWL_RXON_CTX_BSS].vif && priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) { ctx->timing.beacon_interval = priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; beacon_int = le16_to_cpu(ctx->timing.beacon_interval); } else if (ctx->ctxid == IWL_RXON_CTX_BSS && iwl_is_associated(priv, IWL_RXON_CTX_PAN) && priv->contexts[IWL_RXON_CTX_PAN].vif && priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int && (!iwl_is_associated_ctx(ctx) || !ctx->vif || !ctx->vif->bss_conf.beacon_int)) { ctx->timing.beacon_interval = priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval; beacon_int = le16_to_cpu(ctx->timing.beacon_interval); } else { beacon_int = iwl_adjust_beacon_interval(beacon_int, IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT); ctx->timing.beacon_interval = cpu_to_le16(beacon_int); } ctx->beacon_int = beacon_int; tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ interval_tm = beacon_int * TIME_UNIT; rem = do_div(tsf, interval_tm); ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; IWL_DEBUG_ASSOC(priv, "beacon interval %d beacon timer %d beacon tim %d\n", le16_to_cpu(ctx->timing.beacon_interval), le32_to_cpu(ctx->timing.beacon_init_val), le16_to_cpu(ctx->timing.atim_window)); return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, 0, sizeof(ctx->timing), &ctx->timing); } static int iwlagn_rxon_disconn(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; struct iwl_rxon_cmd *active = (void *)&ctx->active; if (ctx->ctxid == IWL_RXON_CTX_BSS) { ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); } else { ret = iwlagn_disable_pan(priv, ctx, &ctx->staging); if (ret) return ret; if (ctx->vif) { ret = iwl_send_rxon_timing(priv, ctx); if (ret) { IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); return ret; } ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging); } } if (ret) return ret; /* * Un-assoc RXON clears the station table and WEP * keys, so we have to restore those afterwards. */ iwl_clear_ucode_stations(priv, ctx); /* update -- might need P2P now */ iwl_update_bcast_station(priv, ctx); iwl_restore_stations(priv, ctx); ret = iwl_restore_default_wep_keys(priv, ctx); if (ret) { IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); return ret; } memcpy(active, &ctx->staging, sizeof(*active)); return 0; } static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) { int ret; s8 prev_tx_power; bool defer; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED) return 0; lockdep_assert_held(&priv->mutex); if (priv->tx_power_user_lmt == tx_power && !force) return 0; if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", tx_power, IWLAGN_TX_POWER_TARGET_POWER_MIN); return -EINVAL; } if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) { IWL_WARN(priv, "Requested user TXPOWER %d above upper limit %d.\n", tx_power, priv->nvm_data->max_tx_pwr_half_dbm); return -EINVAL; } if (!iwl_is_ready_rf(priv)) return -EIO; /* scan complete and commit_rxon use tx_power_next value, * it always need to be updated for newest request */ priv->tx_power_next = tx_power; /* do not set tx power when scanning or channel changing */ defer = test_bit(STATUS_SCANNING, &priv->status) || memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); if (defer && !force) { IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); return 0; } prev_tx_power = priv->tx_power_user_lmt; priv->tx_power_user_lmt = tx_power; ret = iwlagn_send_tx_power(priv); /* if fail to set tx_power, restore the orig. tx power */ if (ret) { priv->tx_power_user_lmt = prev_tx_power; priv->tx_power_next = prev_tx_power; } return ret; } static int iwlagn_rxon_connect(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int ret; struct iwl_rxon_cmd *active = (void *)&ctx->active; /* RXON timing must be before associated RXON */ if (ctx->ctxid == IWL_RXON_CTX_BSS) { ret = iwl_send_rxon_timing(priv, ctx); if (ret) { IWL_ERR(priv, "Failed to send timing (%d)!\n", ret); return ret; } } /* QoS info may be cleared by previous un-assoc RXON */ iwlagn_update_qos(priv, ctx); /* * We'll run into this code path when beaconing is * enabled, but then we also need to send the beacon * to the device. */ if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) { ret = iwlagn_update_beacon(priv, ctx->vif); if (ret) { IWL_ERR(priv, "Error sending required beacon (%d)!\n", ret); return ret; } } priv->start_calib = 0; /* * Apply the new configuration. * * Associated RXON doesn't clear the station table in uCode, * so we don't need to restore stations etc. after this. */ ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, sizeof(struct iwl_rxon_cmd), &ctx->staging); if (ret) { IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); return ret; } memcpy(active, &ctx->staging, sizeof(*active)); /* IBSS beacon needs to be sent after setting assoc */ if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC)) if (iwlagn_update_beacon(priv, ctx->vif)) IWL_ERR(priv, "Error sending IBSS beacon\n"); iwl_init_sensitivity(priv); /* * If we issue a new RXON command which required a tune then * we must send a new TXPOWER command or we won't be able to * Tx any frames. * * It's expected we set power here if channel is changing. */ ret = iwl_set_tx_power(priv, priv->tx_power_next, true); if (ret) { IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; } if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) ieee80211_request_smps(ctx->vif, priv->cfg->ht_params->smps_mode); return 0; } int iwlagn_set_pan_params(struct iwl_priv *priv) { struct iwl_wipan_params_cmd cmd; struct iwl_rxon_context *ctx_bss, *ctx_pan; int slot0 = 300, slot1 = 0; int ret; if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS)) return 0; BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); lockdep_assert_held(&priv->mutex); ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; /* * If the PAN context is inactive, then we don't need * to update the PAN parameters, the last thing we'll * have done before it goes inactive is making the PAN * parameters be WLAN-only. */ if (!ctx_pan->is_active) return 0; memset(&cmd, 0, sizeof(cmd)); /* only 2 slots are currently allowed */ cmd.num_slots = 2; cmd.slots[0].type = 0; /* BSS */ cmd.slots[1].type = 1; /* PAN */ if (ctx_bss->vif && ctx_pan->vif) { int bcnint = ctx_pan->beacon_int; int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; /* should be set, but seems unused?? */ cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); if (ctx_pan->vif->type == NL80211_IFTYPE_AP && bcnint && bcnint != ctx_bss->beacon_int) { IWL_ERR(priv, "beacon intervals don't match (%d, %d)\n", ctx_bss->beacon_int, ctx_pan->beacon_int); } else bcnint = max_t(int, bcnint, ctx_bss->beacon_int); if (!bcnint) bcnint = DEFAULT_BEACON_INTERVAL; slot0 = bcnint / 2; slot1 = bcnint - slot0; if (test_bit(STATUS_SCAN_HW, &priv->status) || (!ctx_bss->vif->bss_conf.idle && !ctx_bss->vif->bss_conf.assoc)) { slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; slot1 = IWL_MIN_SLOT_TIME; } else if (!ctx_pan->vif->bss_conf.idle && !ctx_pan->vif->bss_conf.assoc) { slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; slot0 = IWL_MIN_SLOT_TIME; } } else if (ctx_pan->vif) { slot0 = 0; slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * ctx_pan->beacon_int; slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); if (test_bit(STATUS_SCAN_HW, &priv->status)) { slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; slot1 = IWL_MIN_SLOT_TIME; } } cmd.slots[0].width = cpu_to_le16(slot0); cmd.slots[1].width = cpu_to_le16(slot1); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); return ret; } static void _iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf, struct iwl_rxon_context *ctx) { struct iwl_rxon_cmd *rxon = &ctx->staging; if (!ctx->ht.enabled) { rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK | RXON_FLG_HT_PROT_MSK); return; } /* FIXME: if the definition of ht.protection changed, the "translation" * will be needed for rxon->flags */ rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS); /* Set up channel bandwidth: * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ /* clear the HT channel mode before set the mode */ rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { /* pure ht40 */ if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; /* * Note: control channel is opposite of extension * channel */ switch (ctx->ht.extension_chan_offset) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; break; } } else { /* * Note: control channel is opposite of extension * channel */ switch (ctx->ht.extension_chan_offset) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; break; case IEEE80211_HT_PARAM_CHA_SEC_NONE: default: /* * channel location only valid if in Mixed * mode */ IWL_ERR(priv, "invalid extension channel offset\n"); break; } } } else { rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; } iwlagn_set_rxon_chain(priv, ctx); IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags), ctx->ht.protection, ctx->ht.extension_chan_offset); } void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) { struct iwl_rxon_context *ctx; for_each_context(priv, ctx) _iwl_set_rxon_ht(priv, ht_conf, ctx); } /** * iwl_set_rxon_channel - Set the band and channel values in staging RXON * @ch: requested channel as a pointer to struct ieee80211_channel * NOTE: Does not commit to the hardware; it sets appropriate bit fields * in the staging RXON flag structure based on the ch->band */ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx) { enum ieee80211_band band = ch->band; u16 channel = ch->hw_value; if ((le16_to_cpu(ctx->staging.channel) == channel) && (priv->band == band)) return; ctx->staging.channel = cpu_to_le16(channel); if (band == IEEE80211_BAND_5GHZ) ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; else ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; priv->band = band; IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); } void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, enum ieee80211_band band, struct ieee80211_vif *vif) { if (band == IEEE80211_BAND_5GHZ) { ctx->staging.flags &= ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_CCK_MSK); ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; } else { /* Copied from iwl_post_associate() */ if (vif && vif->bss_conf.use_short_slot) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; ctx->staging.flags &= ~RXON_FLG_CCK_MSK; } } static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int hw_decrypt) { struct iwl_rxon_cmd *rxon = &ctx->staging; if (hw_decrypt) rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; else rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; } /* validate RXON structure is valid */ static int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { struct iwl_rxon_cmd *rxon = &ctx->staging; u32 errors = 0; if (rxon->flags & RXON_FLG_BAND_24G_MSK) { if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { IWL_WARN(priv, "check 2.4G: wrong narrow\n"); errors |= BIT(0); } if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { IWL_WARN(priv, "check 2.4G: wrong radar\n"); errors |= BIT(1); } } else { if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { IWL_WARN(priv, "check 5.2G: not short slot!\n"); errors |= BIT(2); } if (rxon->flags & RXON_FLG_CCK_MSK) { IWL_WARN(priv, "check 5.2G: CCK!\n"); errors |= BIT(3); } } if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { IWL_WARN(priv, "mac/bssid mcast!\n"); errors |= BIT(4); } /* make sure basic rates 6Mbps and 1Mbps are supported */ if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { IWL_WARN(priv, "neither 1 nor 6 are basic\n"); errors |= BIT(5); } if (le16_to_cpu(rxon->assoc_id) > 2007) { IWL_WARN(priv, "aid > 2007\n"); errors |= BIT(6); } if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { IWL_WARN(priv, "CCK and short slot\n"); errors |= BIT(7); } if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { IWL_WARN(priv, "CCK and auto detect\n"); errors |= BIT(8); } if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK) { IWL_WARN(priv, "TGg but no auto-detect\n"); errors |= BIT(9); } if (rxon->channel == 0) { IWL_WARN(priv, "zero channel is invalid\n"); errors |= BIT(10); } WARN(errors, "Invalid RXON (%#x), channel %d", errors, le16_to_cpu(rxon->channel)); return errors ? -EINVAL : 0; } /** * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed * @priv: staging_rxon is compared to active_rxon * * If the RXON structure is changing enough to require a new tune, * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. */ static int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { const struct iwl_rxon_cmd *staging = &ctx->staging; const struct iwl_rxon_cmd *active = &ctx->active; #define CHK(cond) \ if ((cond)) { \ IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ return 1; \ } #define CHK_NEQ(c1, c2) \ if ((c1) != (c2)) { \ IWL_DEBUG_INFO(priv, "need full RXON - " \ #c1 " != " #c2 " - %d != %d\n", \ (c1), (c2)); \ return 1; \ } /* These items are only settable from the full RXON command */ CHK(!iwl_is_associated_ctx(ctx)); CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr)); CHK(!ether_addr_equal(staging->node_addr, active->node_addr)); CHK(!ether_addr_equal(staging->wlap_bssid_addr, active->wlap_bssid_addr)); CHK_NEQ(staging->dev_type, active->dev_type); CHK_NEQ(staging->channel, active->channel); CHK_NEQ(staging->air_propagation, active->air_propagation); CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, active->ofdm_ht_single_stream_basic_rates); CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, active->ofdm_ht_dual_stream_basic_rates); CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates, active->ofdm_ht_triple_stream_basic_rates); CHK_NEQ(staging->assoc_id, active->assoc_id); /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can * be updated with the RXON_ASSOC command -- however only some * flag transitions are allowed using RXON_ASSOC */ /* Check if we are not switching bands */ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, active->flags & RXON_FLG_BAND_24G_MSK); /* Check if we are switching association toggle */ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, active->filter_flags & RXON_FILTER_ASSOC_MSK); #undef CHK #undef CHK_NEQ return 0; } #ifdef CONFIG_IWLWIFI_DEBUG void iwl_print_rx_config_cmd(struct iwl_priv *priv, enum iwl_rxon_context_id ctxid) { struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; struct iwl_rxon_cmd *rxon = &ctx->staging; IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags)); IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates); IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); } #endif static void iwl_calc_basic_rates(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { int lowest_present_ofdm = 100; int lowest_present_cck = 100; u8 cck = 0; u8 ofdm = 0; if (ctx->vif) { struct ieee80211_supported_band *sband; unsigned long basic = ctx->vif->bss_conf.basic_rates; int i; sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band]; for_each_set_bit(i, &basic, BITS_PER_LONG) { int hw = sband->bitrates[i].hw_value; if (hw >= IWL_FIRST_OFDM_RATE) { ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); if (lowest_present_ofdm > hw) lowest_present_ofdm = hw; } else { BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); cck |= BIT(hw); if (lowest_present_cck > hw) lowest_present_cck = hw; } } } /* * Now we've got the basic rates as bitmaps in the ofdm and cck * variables. This isn't sufficient though, as there might not * be all the right rates in the bitmap. E.g. if the only basic * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps * and 6 Mbps because the 802.11-2007 standard says in 9.6: * * [...] a STA responding to a received frame shall transmit * its Control Response frame [...] at the highest rate in the * BSSBasicRateSet parameter that is less than or equal to the * rate of the immediately previous frame in the frame exchange * sequence ([...]) and that is of the same modulation class * ([...]) as the received frame. If no rate contained in the * BSSBasicRateSet parameter meets these conditions, then the * control frame sent in response to a received frame shall be * transmitted at the highest mandatory rate of the PHY that is * less than or equal to the rate of the received frame, and * that is of the same modulation class as the received frame. * * As a consequence, we need to add all mandatory rates that are * lower than all of the basic rates to these bitmaps. */ if (IWL_RATE_24M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE; if (IWL_RATE_12M_INDEX < lowest_present_ofdm) ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE; /* 6M already there or needed so always add */ ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE; /* * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. * Note, however: * - if no CCK rates are basic, it must be ERP since there must * be some basic rates at all, so they're OFDM => ERP PHY * (or we're in 5 GHz, and the cck bitmap will never be used) * - if 11M is a basic rate, it must be ERP as well, so add 5.5M * - if 5.5M is basic, 1M and 2M are mandatory * - if 2M is basic, 1M is mandatory * - if 1M is basic, that's the only valid ACK rate. * As a consequence, it's not as complicated as it sounds, just add * any lower rates to the ACK rate bitmap. */ if (IWL_RATE_11M_INDEX < lowest_present_cck) cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; if (IWL_RATE_5M_INDEX < lowest_present_cck) cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; if (IWL_RATE_2M_INDEX < lowest_present_cck) cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; /* 1M already there or needed so always add */ cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n", cck, ofdm); /* "basic_rates" is a misnomer here -- should be called ACK rates */ ctx->staging.cck_basic_rates = cck; ctx->staging.ofdm_basic_rates = ofdm; } /** * iwlagn_commit_rxon - commit staging_rxon to hardware * * The RXON command in staging_rxon is committed to the hardware and * the active_rxon structure is updated with the new data. This * function correctly transitions out of the RXON_ASSOC_MSK state if * a HW tune is required based on the RXON structure changes. * * The connect/disconnect flow should be as the following: * * 1. make sure send RXON command with association bit unset if not connect * this should include the channel and the band for the candidate * to be connected to * 2. Add Station before RXON association with the AP * 3. RXON_timing has to send before RXON for connection * 4. full RXON command - associated bit set * 5. use RXON_ASSOC command to update any flags changes */ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { /* cast away the const for active_rxon in this function */ struct iwl_rxon_cmd *active = (void *)&ctx->active; bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); int ret; lockdep_assert_held(&priv->mutex); if (!iwl_is_alive(priv)) return -EBUSY; /* This function hardcodes a bunch of dual-mode assumptions */ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); if (!ctx->is_active) return 0; /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; /* recalculate basic rates */ iwl_calc_basic_rates(priv, ctx); /* * force CTS-to-self frames protection if RTS-CTS is not preferred * one aggregation protection method */ if (!priv->hw_params.use_rts_for_aggregation) ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; else ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; iwl_print_rx_config_cmd(priv, ctx->ctxid); ret = iwl_check_rxon_cmd(priv, ctx); if (ret) { IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); return -EINVAL; } /* * receive commit_rxon request * abort any previous channel switch if still in process */ if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && (priv->switch_channel != ctx->staging.channel)) { IWL_DEBUG_11H(priv, "abort channel switch on %d\n", le16_to_cpu(priv->switch_channel)); iwl_chswitch_done(priv, false); } /* * If we don't need to send a full RXON, we can use * iwl_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ if (!iwl_full_rxon_required(priv, ctx)) { ret = iwlagn_send_rxon_assoc(priv, ctx); if (ret) { IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); return ret; } memcpy(active, &ctx->staging, sizeof(*active)); /* * We do not commit tx power settings while channel changing, * do it now if after settings changed. */ iwl_set_tx_power(priv, priv->tx_power_next, false); /* make sure we are in the right PS state */ iwl_power_update_mode(priv, true); return 0; } iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto); IWL_DEBUG_INFO(priv, "Going to commit RXON\n" " * with%s RXON_FILTER_ASSOC_MSK\n" " * channel = %d\n" " * bssid = %pM\n", (new_assoc ? "" : "out"), le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr); /* * Always clear associated first, but with the correct config. * This is required as for example station addition for the * AP station must be done after the BSSID is set to correctly * set up filters in the device. */ ret = iwlagn_rxon_disconn(priv, ctx); if (ret) return ret; ret = iwlagn_set_pan_params(priv); if (ret) return ret; if (new_assoc) return iwlagn_rxon_connect(priv, ctx); return 0; } void iwlagn_config_ht40(struct ieee80211_conf *conf, struct iwl_rxon_context *ctx) { if (conf_is_ht40_minus(conf)) { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW; ctx->ht.is_40mhz = true; } else if (conf_is_ht40_plus(conf)) { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; ctx->ht.is_40mhz = true; } else { ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; ctx->ht.is_40mhz = false; } } int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx; struct ieee80211_conf *conf = &hw->conf; struct ieee80211_channel *channel = conf->chandef.chan; int ret = 0; IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed); mutex_lock(&priv->mutex); if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); goto out; } if (!iwl_is_ready(priv)) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); goto out; } if (changed & (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) { /* mac80211 uses static for non-HT which is what we want */ priv->current_ht_config.smps = conf->smps_mode; /* * Recalculate chain counts. * * If monitor mode is enabled then mac80211 will * set up the SM PS mode to OFF if an HT channel is * configured. */ for_each_context(priv, ctx) iwlagn_set_rxon_chain(priv, ctx); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { for_each_context(priv, ctx) { /* Configure HT40 channels */ if (ctx->ht.enabled != conf_is_ht(conf)) ctx->ht.enabled = conf_is_ht(conf); if (ctx->ht.enabled) { /* if HT40 is used, it should not change * after associated except channel switch */ if (!ctx->ht.is_40mhz || !iwl_is_associated_ctx(ctx)) iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; /* * Default to no protection. Protection mode will * later be set from BSS config in iwl_ht_conf */ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; /* if we are switching from ht to 2.4 clear flags * from any ht related info since 2.4 does not * support ht */ if (le16_to_cpu(ctx->staging.channel) != channel->hw_value) ctx->staging.flags = 0; iwl_set_rxon_channel(priv, channel, ctx); iwl_set_rxon_ht(priv, &priv->current_ht_config); iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); } iwl_update_bcast_stations(priv); } if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) { ret = iwl_power_update_mode(priv, false); if (ret) IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); } if (changed & IEEE80211_CONF_CHANGE_POWER) { IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", priv->tx_power_user_lmt, conf->power_level); iwl_set_tx_power(priv, conf->power_level, false); } for_each_context(priv, ctx) { if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) continue; iwlagn_commit_rxon(priv, ctx); } out: mutex_unlock(&priv->mutex); IWL_DEBUG_MAC80211(priv, "leave\n"); return ret; } static void iwlagn_check_needed_chains(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_bss_conf *bss_conf) { struct ieee80211_vif *vif = ctx->vif; struct iwl_rxon_context *tmp; struct ieee80211_sta *sta; struct iwl_ht_config *ht_conf = &priv->current_ht_config; struct ieee80211_sta_ht_cap *ht_cap; bool need_multiple; lockdep_assert_held(&priv->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { /* * If at all, this can only happen through a race * when the AP disconnects us while we're still * setting up the connection, in that case mac80211 * will soon tell us about that. */ need_multiple = false; rcu_read_unlock(); break; } ht_cap = &sta->ht_cap; need_multiple = true; /* * If the peer advertises no support for receiving 2 and 3 * stream MCS rates, it can't be transmitting them either. */ if (ht_cap->mcs.rx_mask[1] == 0 && ht_cap->mcs.rx_mask[2] == 0) { need_multiple = false; } else if (!(ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_DEFINED)) { /* If it can't TX MCS at all ... */ need_multiple = false; } else if (ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) { int maxstreams; /* * But if it can receive them, it might still not * be able to transmit them, which is what we need * to check here -- so check the number of streams * it advertises for TX (if different from RX). */ maxstreams = (ht_cap->mcs.tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK); maxstreams >>= IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; maxstreams += 1; if (maxstreams <= 1) need_multiple = false; } rcu_read_unlock(); break; case NL80211_IFTYPE_ADHOC: /* currently */ need_multiple = false; break; default: /* only AP really */ need_multiple = true; break; } ctx->ht_need_multiple_chains = need_multiple; if (!need_multiple) { /* check all contexts */ for_each_context(priv, tmp) { if (!tmp->vif) continue; if (tmp->ht_need_multiple_chains) { need_multiple = true; break; } } } ht_conf->single_chain_sufficient = !need_multiple; } static void iwlagn_chain_noise_reset(struct iwl_priv *priv) { struct iwl_chain_noise_data *data = &priv->chain_noise_data; int ret; if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED) return; if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_any_associated(priv)) { struct iwl_calib_chain_noise_reset_cmd cmd; /* clear data for chain noise calibration algorithm */ data->chain_noise_a = 0; data->chain_noise_b = 0; data->chain_noise_c = 0; data->chain_signal_a = 0; data->chain_signal_b = 0; data->chain_signal_c = 0; data->beacon_count = 0; memset(&cmd, 0, sizeof(cmd)); iwl_set_calib_hdr(&cmd.hdr, priv->phy_calib_chain_noise_reset_cmd); ret = iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(priv, "Could not send REPLY_PHY_CALIBRATION_CMD\n"); data->state = IWL_CHAIN_NOISE_ACCUMULATE; IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n"); } } void iwlagn_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); int ret; bool force = false; mutex_lock(&priv->mutex); if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { /* * If we go idle, then clearly no "passive-no-rx" * workaround is needed any more, this is a reset. */ iwlagn_lift_passive_no_rx(priv); } if (unlikely(!iwl_is_ready(priv))) { IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); mutex_unlock(&priv->mutex); return; } if (unlikely(!ctx->vif)) { IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n"); mutex_unlock(&priv->mutex); return; } if (changes & BSS_CHANGED_BEACON_INT) force = true; if (changes & BSS_CHANGED_QOS) { ctx->qos_data.qos_active = bss_conf->qos; iwlagn_update_qos(priv, ctx); } ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); if (vif->bss_conf.use_short_preamble) ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; else ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { priv->timestamp = bss_conf->sync_tsf; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; } else { ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; if (ctx->ctxid == IWL_RXON_CTX_BSS) priv->have_rekey_data = false; } iwlagn_bt_coex_rssi_monitor(priv); } if (ctx->ht.enabled) { ctx->ht.protection = bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); iwlagn_check_needed_chains(priv, ctx, bss_conf); iwl_set_rxon_ht(priv, &priv->current_ht_config); } iwlagn_set_rxon_chain(priv, ctx); if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; else ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; if (bss_conf->use_cts_prot) ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; else ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { if (vif->bss_conf.enable_beacon) { ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; priv->beacon_ctx = ctx; } else { ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; priv->beacon_ctx = NULL; } } /* * If the ucode decides to do beacon filtering before * association, it will lose beacons that are needed * before sending frames out on passive channels. This * causes association failures on those channels. Enable * receiving beacons in such cases. */ if (vif->type == NL80211_IFTYPE_STATION) { if (!bss_conf->assoc) ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK; else ctx->staging.filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK; } if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) iwlagn_commit_rxon(priv, ctx); if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { /* * The chain noise calibration will enable PM upon * completion. If calibration has already been run * then we need to enable power management here. */ if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) iwl_power_update_mode(priv, false); /* Enable RX differential gain and sensitivity calibrations */ iwlagn_chain_noise_reset(priv); priv->start_calib = 1; } if (changes & BSS_CHANGED_IBSS) { ret = iwlagn_manage_ibss_station(priv, vif, bss_conf->ibss_joined); if (ret) IWL_ERR(priv, "failed to %s IBSS station %pM\n", bss_conf->ibss_joined ? "add" : "remove", bss_conf->bssid); } if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) { if (iwlagn_update_beacon(priv, vif)) IWL_ERR(priv, "Error updating beacon\n"); } mutex_unlock(&priv->mutex); } void iwlagn_post_scan(struct iwl_priv *priv) { struct iwl_rxon_context *ctx; /* * We do not commit power settings while scan is pending, * do it now if the settings changed. */ iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false); iwl_set_tx_power(priv, priv->tx_power_next, false); /* * Since setting the RXON may have been deferred while * performing the scan, fire one off if needed */ for_each_context(priv, ctx) if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) iwlagn_commit_rxon(priv, ctx); iwlagn_set_pan_params(priv); }
gpl-2.0
alquez/HTC-Wildfire-S-Kernel
drivers/staging/ramzswap/xvmalloc.c
1043
12768
/* * xvmalloc memory allocator * * Copyright (C) 2008, 2009, 2010 Nitin Gupta * * This code is released using a dual license strategy: BSD/GPL * You can choose the licence that better fits your requirements. * * Released under the terms of 3-clause BSD License * Released under the terms of GNU General Public License Version 2.0 */ #include <linux/bitops.h> #include <linux/errno.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include "xvmalloc.h" #include "xvmalloc_int.h" static void stat_inc(u64 *value) { *value = *value + 1; } static void stat_dec(u64 *value) { *value = *value - 1; } static int test_flag(struct block_header *block, enum blockflags flag) { return block->prev & BIT(flag); } static void set_flag(struct block_header *block, enum blockflags flag) { block->prev |= BIT(flag); } static void clear_flag(struct block_header *block, enum blockflags flag) { block->prev &= ~BIT(flag); } /* * Given <page, offset> pair, provide a derefrencable pointer. * This is called from xv_malloc/xv_free path, so it * needs to be fast. */ static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) { unsigned char *base; base = kmap_atomic(page, type); return base + offset; } static void put_ptr_atomic(void *ptr, enum km_type type) { kunmap_atomic(ptr, type); } static u32 get_blockprev(struct block_header *block) { return block->prev & PREV_MASK; } static void set_blockprev(struct block_header *block, u16 new_offset) { block->prev = new_offset | (block->prev & FLAGS_MASK); } static struct block_header *BLOCK_NEXT(struct block_header *block) { return (struct block_header *) ((char *)block + block->size + XV_ALIGN); } /* * Get index of free list containing blocks of maximum size * which is less than or equal to given size. */ static u32 get_index_for_insert(u32 size) { if (unlikely(size > XV_MAX_ALLOC_SIZE)) size = XV_MAX_ALLOC_SIZE; size &= ~FL_DELTA_MASK; return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT; } /* * Get index of free list having blocks of size greater than * or equal to requested size. */ static u32 get_index(u32 size) { if (unlikely(size < XV_MIN_ALLOC_SIZE)) size = XV_MIN_ALLOC_SIZE; size = ALIGN(size, FL_DELTA); return (size - XV_MIN_ALLOC_SIZE) >> FL_DELTA_SHIFT; } /** * find_block - find block of at least given size * @pool: memory pool to search from * @size: size of block required * @page: page containing required block * @offset: offset within the page where block is located. * * Searches two level bitmap to locate block of at least * the given size. If such a block is found, it provides * <page, offset> to identify this block and returns index * in freelist where we found this block. * Otherwise, returns 0 and <page, offset> params are not touched. */ static u32 find_block(struct xv_pool *pool, u32 size, struct page **page, u32 *offset) { ulong flbitmap, slbitmap; u32 flindex, slindex, slbitstart; /* There are no free blocks in this pool */ if (!pool->flbitmap) return 0; /* Get freelist index correspoding to this size */ slindex = get_index(size); slbitmap = pool->slbitmap[slindex / BITS_PER_LONG]; slbitstart = slindex % BITS_PER_LONG; /* * If freelist is not empty at this index, we found the * block - head of this list. This is approximate best-fit match. */ if (test_bit(slbitstart, &slbitmap)) { *page = pool->freelist[slindex].page; *offset = pool->freelist[slindex].offset; return slindex; } /* * No best-fit found. Search a bit further in bitmap for a free block. * Second level bitmap consists of series of 32-bit chunks. Search * further in the chunk where we expected a best-fit, starting from * index location found above. */ slbitstart++; slbitmap >>= slbitstart; /* Skip this search if we were already at end of this bitmap chunk */ if ((slbitstart != BITS_PER_LONG) && slbitmap) { slindex += __ffs(slbitmap) + 1; *page = pool->freelist[slindex].page; *offset = pool->freelist[slindex].offset; return slindex; } /* Now do a full two-level bitmap search to find next nearest fit */ flindex = slindex / BITS_PER_LONG; flbitmap = (pool->flbitmap) >> (flindex + 1); if (!flbitmap) return 0; flindex += __ffs(flbitmap) + 1; slbitmap = pool->slbitmap[flindex]; slindex = (flindex * BITS_PER_LONG) + __ffs(slbitmap); *page = pool->freelist[slindex].page; *offset = pool->freelist[slindex].offset; return slindex; } /* * Insert block at <page, offset> in freelist of given pool. * freelist used depends on block size. */ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset, struct block_header *block) { u32 flindex, slindex; struct block_header *nextblock; slindex = get_index_for_insert(block->size); flindex = slindex / BITS_PER_LONG; block->link.prev_page = 0; block->link.prev_offset = 0; block->link.next_page = pool->freelist[slindex].page; block->link.next_offset = pool->freelist[slindex].offset; pool->freelist[slindex].page = page; pool->freelist[slindex].offset = offset; if (block->link.next_page) { nextblock = get_ptr_atomic(block->link.next_page, block->link.next_offset, KM_USER1); nextblock->link.prev_page = page; nextblock->link.prev_offset = offset; put_ptr_atomic(nextblock, KM_USER1); } __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]); __set_bit(flindex, &pool->flbitmap); } /* * Remove block from head of freelist. Index 'slindex' identifies the freelist. */ static void remove_block_head(struct xv_pool *pool, struct block_header *block, u32 slindex) { struct block_header *tmpblock; u32 flindex = slindex / BITS_PER_LONG; pool->freelist[slindex].page = block->link.next_page; pool->freelist[slindex].offset = block->link.next_offset; block->link.prev_page = 0; block->link.prev_offset = 0; if (!pool->freelist[slindex].page) { __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]); if (!pool->slbitmap[flindex]) __clear_bit(flindex, &pool->flbitmap); } else { /* * DEBUG ONLY: We need not reinitialize freelist head previous * pointer to 0 - we never depend on its value. But just for * sanity, lets do it. */ tmpblock = get_ptr_atomic(pool->freelist[slindex].page, pool->freelist[slindex].offset, KM_USER1); tmpblock->link.prev_page = 0; tmpblock->link.prev_offset = 0; put_ptr_atomic(tmpblock, KM_USER1); } } /* * Remove block from freelist. Index 'slindex' identifies the freelist. */ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, struct block_header *block, u32 slindex) { u32 flindex; struct block_header *tmpblock; if (pool->freelist[slindex].page == page && pool->freelist[slindex].offset == offset) { remove_block_head(pool, block, slindex); return; } flindex = slindex / BITS_PER_LONG; if (block->link.prev_page) { tmpblock = get_ptr_atomic(block->link.prev_page, block->link.prev_offset, KM_USER1); tmpblock->link.next_page = block->link.next_page; tmpblock->link.next_offset = block->link.next_offset; put_ptr_atomic(tmpblock, KM_USER1); } if (block->link.next_page) { tmpblock = get_ptr_atomic(block->link.next_page, block->link.next_offset, KM_USER1); tmpblock->link.prev_page = block->link.prev_page; tmpblock->link.prev_offset = block->link.prev_offset; put_ptr_atomic(tmpblock, KM_USER1); } } /* * Allocate a page and add it to freelist of given pool. */ static int grow_pool(struct xv_pool *pool, gfp_t flags) { struct page *page; struct block_header *block; page = alloc_page(flags); if (unlikely(!page)) return -ENOMEM; stat_inc(&pool->total_pages); spin_lock(&pool->lock); block = get_ptr_atomic(page, 0, KM_USER0); block->size = PAGE_SIZE - XV_ALIGN; set_flag(block, BLOCK_FREE); clear_flag(block, PREV_FREE); set_blockprev(block, 0); insert_block(pool, page, 0, block); put_ptr_atomic(block, KM_USER0); spin_unlock(&pool->lock); return 0; } /* * Create a memory pool. Allocates freelist, bitmaps and other * per-pool metadata. */ struct xv_pool *xv_create_pool(void) { u32 ovhd_size; struct xv_pool *pool; ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); pool = kzalloc(ovhd_size, GFP_KERNEL); if (!pool) return NULL; spin_lock_init(&pool->lock); return pool; } void xv_destroy_pool(struct xv_pool *pool) { kfree(pool); } /** * xv_malloc - Allocate block of given size from pool. * @pool: pool to allocate from * @size: size of block to allocate * @page: page no. that holds the object * @offset: location of object within page * * On success, <page, offset> identifies block allocated * and 0 is returned. On failure, <page, offset> is set to * 0 and -ENOMEM is returned. * * Allocation requests with size > XV_MAX_ALLOC_SIZE will fail. */ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, u32 *offset, gfp_t flags) { int error; u32 index, tmpsize, origsize, tmpoffset; struct block_header *block, *tmpblock; *page = NULL; *offset = 0; origsize = size; if (unlikely(!size || size > XV_MAX_ALLOC_SIZE)) return -ENOMEM; size = ALIGN(size, XV_ALIGN); spin_lock(&pool->lock); index = find_block(pool, size, page, offset); if (!*page) { spin_unlock(&pool->lock); if (flags & GFP_NOWAIT) return -ENOMEM; error = grow_pool(pool, flags); if (unlikely(error)) return error; spin_lock(&pool->lock); index = find_block(pool, size, page, offset); } if (!*page) { spin_unlock(&pool->lock); return -ENOMEM; } block = get_ptr_atomic(*page, *offset, KM_USER0); remove_block_head(pool, block, index); /* Split the block if required */ tmpoffset = *offset + size + XV_ALIGN; tmpsize = block->size - size; tmpblock = (struct block_header *)((char *)block + size + XV_ALIGN); if (tmpsize) { tmpblock->size = tmpsize - XV_ALIGN; set_flag(tmpblock, BLOCK_FREE); clear_flag(tmpblock, PREV_FREE); set_blockprev(tmpblock, *offset); if (tmpblock->size >= XV_MIN_ALLOC_SIZE) insert_block(pool, *page, tmpoffset, tmpblock); if (tmpoffset + XV_ALIGN + tmpblock->size != PAGE_SIZE) { tmpblock = BLOCK_NEXT(tmpblock); set_blockprev(tmpblock, tmpoffset); } } else { /* This block is exact fit */ if (tmpoffset != PAGE_SIZE) clear_flag(tmpblock, PREV_FREE); } block->size = origsize; clear_flag(block, BLOCK_FREE); put_ptr_atomic(block, KM_USER0); spin_unlock(&pool->lock); *offset += XV_ALIGN; return 0; } /* * Free block identified with <page, offset> */ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) { void *page_start; struct block_header *block, *tmpblock; offset -= XV_ALIGN; spin_lock(&pool->lock); page_start = get_ptr_atomic(page, 0, KM_USER0); block = (struct block_header *)((char *)page_start + offset); /* Catch double free bugs */ BUG_ON(test_flag(block, BLOCK_FREE)); block->size = ALIGN(block->size, XV_ALIGN); tmpblock = BLOCK_NEXT(block); if (offset + block->size + XV_ALIGN == PAGE_SIZE) tmpblock = NULL; /* Merge next block if its free */ if (tmpblock && test_flag(tmpblock, BLOCK_FREE)) { /* * Blocks smaller than XV_MIN_ALLOC_SIZE * are not inserted in any free list. */ if (tmpblock->size >= XV_MIN_ALLOC_SIZE) { remove_block(pool, page, offset + block->size + XV_ALIGN, tmpblock, get_index_for_insert(tmpblock->size)); } block->size += tmpblock->size + XV_ALIGN; } /* Merge previous block if its free */ if (test_flag(block, PREV_FREE)) { tmpblock = (struct block_header *)((char *)(page_start) + get_blockprev(block)); offset = offset - tmpblock->size - XV_ALIGN; if (tmpblock->size >= XV_MIN_ALLOC_SIZE) remove_block(pool, page, offset, tmpblock, get_index_for_insert(tmpblock->size)); tmpblock->size += block->size + XV_ALIGN; block = tmpblock; } /* No used objects in this page. Free it. */ if (block->size == PAGE_SIZE - XV_ALIGN) { put_ptr_atomic(page_start, KM_USER0); spin_unlock(&pool->lock); __free_page(page); stat_dec(&pool->total_pages); return; } set_flag(block, BLOCK_FREE); if (block->size >= XV_MIN_ALLOC_SIZE) insert_block(pool, page, offset, block); if (offset + block->size + XV_ALIGN != PAGE_SIZE) { tmpblock = BLOCK_NEXT(block); set_flag(tmpblock, PREV_FREE); set_blockprev(tmpblock, offset); } put_ptr_atomic(page_start, KM_USER0); spin_unlock(&pool->lock); } u32 xv_get_object_size(void *obj) { struct block_header *blk; blk = (struct block_header *)((char *)(obj) - XV_ALIGN); return blk->size; } /* * Returns total memory used by allocator (userdata + metadata) */ u64 xv_get_total_size_bytes(struct xv_pool *pool) { return pool->total_pages << PAGE_SHIFT; }
gpl-2.0
cuboxi/android_kernel_imx6_cuboxi
drivers/media/platform/sh_veu.c
2067
32946
/* * sh-mobile VEU mem2mem driver * * Copyright (C) 2012 Renesas Electronics Corporation * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de> * Copyright (C) 2008 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the version 2 of the GNU General Public License as * published by the Free Software Foundation */ #include <linux/err.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-dma-contig.h> #define VEU_STR 0x00 /* start register */ #define VEU_SWR 0x10 /* src: line length */ #define VEU_SSR 0x14 /* src: image size */ #define VEU_SAYR 0x18 /* src: y/rgb plane address */ #define VEU_SACR 0x1c /* src: c plane address */ #define VEU_BSSR 0x20 /* bundle mode register */ #define VEU_EDWR 0x30 /* dst: line length */ #define VEU_DAYR 0x34 /* dst: y/rgb plane address */ #define VEU_DACR 0x38 /* dst: c plane address */ #define VEU_TRCR 0x50 /* transform control */ #define VEU_RFCR 0x54 /* resize scale */ #define VEU_RFSR 0x58 /* resize clip */ #define VEU_ENHR 0x5c /* enhance */ #define VEU_FMCR 0x70 /* filter mode */ #define VEU_VTCR 0x74 /* lowpass vertical */ #define VEU_HTCR 0x78 /* lowpass horizontal */ #define VEU_APCR 0x80 /* color match */ #define VEU_ECCR 0x84 /* color replace */ #define VEU_AFXR 0x90 /* fixed mode */ #define VEU_SWPR 0x94 /* swap */ #define VEU_EIER 0xa0 /* interrupt mask */ #define VEU_EVTR 0xa4 /* interrupt event */ #define VEU_STAR 0xb0 /* status */ #define VEU_BSRR 0xb4 /* reset */ #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */ #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */ #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */ #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */ #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */ #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */ #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */ #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */ #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */ #define VEU_COFFR 0x224 /* color conversion offset */ #define VEU_CBR 0x228 /* color conversion clip */ /* * 4092x4092 max size is the normal case. In some cases it can be reduced to * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188. */ #define MAX_W 4092 #define MAX_H 4092 #define MIN_W 8 #define MIN_H 8 #define ALIGN_W 4 /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */ #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024) #define MEM2MEM_DEF_TRANSLEN 1 struct sh_veu_dev; struct sh_veu_file { struct sh_veu_dev *veu_dev; bool cfg_needed; }; struct sh_veu_format { char *name; u32 fourcc; unsigned int depth; unsigned int ydepth; }; /* video data format */ struct sh_veu_vfmt { /* Replace with v4l2_rect */ struct v4l2_rect frame; unsigned int bytesperline; unsigned int offset_y; unsigned int offset_c; const struct sh_veu_format *fmt; }; struct sh_veu_dev { struct v4l2_device v4l2_dev; struct video_device vdev; struct v4l2_m2m_dev *m2m_dev; struct device *dev; struct v4l2_m2m_ctx *m2m_ctx; struct sh_veu_vfmt vfmt_out; struct sh_veu_vfmt vfmt_in; /* Only single user per direction so far */ struct sh_veu_file *capture; struct sh_veu_file *output; struct mutex fop_lock; void __iomem *base; struct vb2_alloc_ctx *alloc_ctx; spinlock_t lock; bool is_2h; unsigned int xaction; bool aborting; }; enum sh_veu_fmt_idx { SH_VEU_FMT_NV12, SH_VEU_FMT_NV16, SH_VEU_FMT_NV24, SH_VEU_FMT_RGB332, SH_VEU_FMT_RGB444, SH_VEU_FMT_RGB565, SH_VEU_FMT_RGB666, SH_VEU_FMT_RGB24, }; #define VGA_WIDTH 640 #define VGA_HEIGHT 480 #define DEFAULT_IN_WIDTH VGA_WIDTH #define DEFAULT_IN_HEIGHT VGA_HEIGHT #define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12 #define DEFAULT_OUT_WIDTH VGA_WIDTH #define DEFAULT_OUT_HEIGHT VGA_HEIGHT #define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565 /* * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte * aligned for NV24. */ static const struct sh_veu_format sh_veu_fmt[] = { [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 }, [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 }, [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 }, [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 }, [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 }, [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 }, [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 }, [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 }, }; #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \ .frame = { \ .width = VGA_WIDTH, \ .height = VGA_HEIGHT, \ }, \ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \ .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \ } #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \ .frame = { \ .width = VGA_WIDTH, \ .height = VGA_HEIGHT, \ }, \ .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \ .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \ } /* * TODO: add support for further output formats: * SH_VEU_FMT_NV12, * SH_VEU_FMT_NV16, * SH_VEU_FMT_NV24, * SH_VEU_FMT_RGB332, * SH_VEU_FMT_RGB444, * SH_VEU_FMT_RGB666, * SH_VEU_FMT_RGB24, */ static const int sh_veu_fmt_out[] = { SH_VEU_FMT_RGB565, }; /* * TODO: add support for further input formats: * SH_VEU_FMT_NV16, * SH_VEU_FMT_NV24, * SH_VEU_FMT_RGB565, * SH_VEU_FMT_RGB666, * SH_VEU_FMT_RGB24, */ static const int sh_veu_fmt_in[] = { SH_VEU_FMT_NV12, }; static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc) { switch (fourcc) { default: BUG(); case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV24: return V4L2_COLORSPACE_JPEG; case V4L2_PIX_FMT_RGB332: case V4L2_PIX_FMT_RGB444: case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_BGR666: case V4L2_PIX_FMT_RGB24: return V4L2_COLORSPACE_SRGB; } } static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg) { return ioread32(veu->base + reg); } static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg, u32 value) { iowrite32(value, veu->base + reg); } /* ========== mem2mem callbacks ========== */ static void sh_veu_job_abort(void *priv) { struct sh_veu_dev *veu = priv; /* Will cancel the transaction in the next interrupt handler */ veu->aborting = true; } static void sh_veu_lock(void *priv) { struct sh_veu_dev *veu = priv; mutex_lock(&veu->fop_lock); } static void sh_veu_unlock(void *priv) { struct sh_veu_dev *veu = priv; mutex_unlock(&veu->fop_lock); } static void sh_veu_process(struct sh_veu_dev *veu, struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf) { dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y); sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ? addr + veu->vfmt_out.offset_c : 0); dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__, (unsigned long)addr, veu->vfmt_out.offset_y, veu->vfmt_out.offset_c); addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y); sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ? addr + veu->vfmt_in.offset_c : 0); dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__, (unsigned long)addr, veu->vfmt_in.offset_y, veu->vfmt_in.offset_c); sh_veu_reg_write(veu, VEU_STR, 1); sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */ } /** * sh_veu_device_run() - prepares and starts the device * * This will be called by the framework when it decides to schedule a particular * instance. */ static void sh_veu_device_run(void *priv) { struct sh_veu_dev *veu = priv; struct vb2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx); if (src_buf && dst_buf) sh_veu_process(veu, src_buf, dst_buf); } /* ========== video ioctls ========== */ static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file, enum v4l2_buf_type type) { return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE && veu_file == veu->capture) || (type == V4L2_BUF_TYPE_VIDEO_OUTPUT && veu_file == veu->output); } static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq); /* * It is not unusual to have video nodes open()ed multiple times. While some * V4L2 operations are non-intrusive, like querying formats and various * parameters, others, like setting formats, starting and stopping streaming, * queuing and dequeuing buffers, directly affect hardware configuration and / * or execution. This function verifies availability of the requested interface * and, if available, reserves it for the requesting user. */ static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file, enum v4l2_buf_type type) { struct sh_veu_file **stream; switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: stream = &veu->capture; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: stream = &veu->output; break; default: return -EINVAL; } if (*stream == veu_file) return 0; if (*stream) return -EBUSY; *stream = veu_file; return 0; } static int sh_veu_context_init(struct sh_veu_dev *veu) { if (veu->m2m_ctx) return 0; veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu, sh_veu_queue_init); if (IS_ERR(veu->m2m_ctx)) return PTR_ERR(veu->m2m_ctx); return 0; } static int sh_veu_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strlcpy(cap->driver, "sh-veu", sizeof(cap->driver)); strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card)); strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num) { if (f->index >= fmt_num) return -EINVAL; strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description)); f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc; return 0; } static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out)); } static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in)); } static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &veu->vfmt_out; case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &veu->vfmt_in; default: return NULL; } } static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; struct sh_veu_dev *veu = veu_file->veu_dev; struct sh_veu_vfmt *vfmt; vfmt = sh_veu_get_vfmt(veu, f->type); pix->width = vfmt->frame.width; pix->height = vfmt->frame.height; pix->field = V4L2_FIELD_NONE; pix->pixelformat = vfmt->fmt->fourcc; pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat); pix->bytesperline = vfmt->bytesperline; pix->sizeimage = vfmt->bytesperline * pix->height * vfmt->fmt->depth / vfmt->fmt->ydepth; pix->priv = 0; dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__, f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat); return 0; } static int sh_veu_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { return sh_veu_g_fmt(priv, f); } static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { return sh_veu_g_fmt(priv, f); } static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt) { struct v4l2_pix_format *pix = &f->fmt.pix; unsigned int y_bytes_used; /* * V4L2 specification suggests, that the driver should correct the * format struct if any of the dimensions is unsupported */ switch (pix->field) { default: case V4L2_FIELD_ANY: pix->field = V4L2_FIELD_NONE; /* fall through: continue handling V4L2_FIELD_NONE */ case V4L2_FIELD_NONE: break; } v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W, &pix->height, MIN_H, MAX_H, 0, 0); y_bytes_used = (pix->width * fmt->ydepth) >> 3; if (pix->bytesperline < y_bytes_used) pix->bytesperline = y_bytes_used; pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth; pix->pixelformat = fmt->fourcc; pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat); pix->priv = 0; pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage); return 0; } static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f) { const int *fmt; int i, n, dflt; pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field); switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: fmt = sh_veu_fmt_out; n = ARRAY_SIZE(sh_veu_fmt_out); dflt = DEFAULT_OUT_FMTIDX; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: default: fmt = sh_veu_fmt_in; n = ARRAY_SIZE(sh_veu_fmt_in); dflt = DEFAULT_IN_FMTIDX; break; } for (i = 0; i < n; i++) if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat) return &sh_veu_fmt[fmt[i]]; return &sh_veu_fmt[dflt]; } static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { const struct sh_veu_format *fmt; fmt = sh_veu_find_fmt(f); if (!fmt) /* wrong buffer type */ return -EINVAL; return sh_veu_try_fmt(f, fmt); } static int sh_veu_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { const struct sh_veu_format *fmt; fmt = sh_veu_find_fmt(f); if (!fmt) /* wrong buffer type */ return -EINVAL; return sh_veu_try_fmt(f, fmt); } static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt) { /* dst_left and dst_top validity will be verified in CROP / COMPOSE */ unsigned int left = vfmt->frame.left & ~0x03; unsigned int top = vfmt->frame.top; dma_addr_t offset = ((left * veu->vfmt_out.fmt->depth) >> 3) + top * veu->vfmt_out.bytesperline; unsigned int y_line; vfmt->offset_y = offset; switch (vfmt->fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV24: y_line = ALIGN(vfmt->frame.width, 16); vfmt->offset_c = offset + y_line * vfmt->frame.height; break; case V4L2_PIX_FMT_RGB332: case V4L2_PIX_FMT_RGB444: case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_BGR666: case V4L2_PIX_FMT_RGB24: vfmt->offset_c = 0; break; default: BUG(); } } static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f) { struct v4l2_pix_format *pix = &f->fmt.pix; struct sh_veu_dev *veu = veu_file->veu_dev; struct sh_veu_vfmt *vfmt; struct vb2_queue *vq; int ret = sh_veu_context_init(veu); if (ret < 0) return ret; vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type); if (!vq) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } vfmt = sh_veu_get_vfmt(veu, f->type); /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */ vfmt->fmt = sh_veu_find_fmt(f); /* vfmt->fmt != NULL following the same argument as above */ vfmt->frame.width = pix->width; vfmt->frame.height = pix->height; vfmt->bytesperline = pix->bytesperline; sh_veu_colour_offset(veu, vfmt); /* * We could also verify and require configuration only if any parameters * actually have changed, but it is unlikely, that the user requests the * same configuration several times without closing the device. */ veu_file->cfg_needed = true; dev_dbg(veu->dev, "Setting format for type %d, wxh: %dx%d, fmt: %x\n", f->type, pix->width, pix->height, vfmt->fmt->fourcc); return 0; } static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret = sh_veu_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return sh_veu_s_fmt(priv, f); } static int sh_veu_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret = sh_veu_try_fmt_vid_out(file, priv, f); if (ret) return ret; return sh_veu_s_fmt(priv, f); } static int sh_veu_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct sh_veu_file *veu_file = priv; struct sh_veu_dev *veu = veu_file->veu_dev; int ret = sh_veu_context_init(veu); if (ret < 0) return ret; ret = sh_veu_stream_init(veu, veu_file, reqbufs->type); if (ret < 0) return ret; return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs); } static int sh_veu_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct sh_veu_file *veu_file = priv; if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type)) return -EBUSY; return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf); } static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct sh_veu_file *veu_file = priv; dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type); if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type)) return -EBUSY; return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf); } static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct sh_veu_file *veu_file = priv; dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type); if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type)) return -EBUSY; return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf); } static void sh_veu_calc_scale(struct sh_veu_dev *veu, int size_in, int size_out, int crop_out, u32 *mant, u32 *frac, u32 *rep) { u32 fixpoint; /* calculate FRAC and MANT */ *rep = *mant = *frac = 0; if (size_in == size_out) { if (crop_out != size_out) *mant = 1; /* needed for cropping */ return; } /* VEU2H special upscale */ if (veu->is_2h && size_out > size_in) { u32 fixpoint = (4096 * size_in) / size_out; *mant = fixpoint / 4096; *frac = (fixpoint - (*mant * 4096)) & ~0x07; switch (*frac) { case 0x800: *rep = 1; break; case 0x400: *rep = 3; break; case 0x200: *rep = 7; break; } if (*rep) return; } fixpoint = (4096 * (size_in - 1)) / (size_out + 1); *mant = fixpoint / 4096; *frac = fixpoint - (*mant * 4096); if (*frac & 0x07) { /* * FIXME: do we really have to round down twice in the * up-scaling case? */ *frac &= ~0x07; if (size_out > size_in) *frac -= 8; /* round down if scaling up */ else *frac += 8; /* round up if scaling down */ } } static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu, int size_in, int size_out, int crop_out) { u32 mant, frac, value, rep; sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep); /* set scale */ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) | (((mant << 12) | frac) << 16); sh_veu_reg_write(veu, VEU_RFCR, value); /* set clip */ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) | (((rep << 12) | crop_out) << 16); sh_veu_reg_write(veu, VEU_RFSR, value); return ALIGN((size_in * crop_out) / size_out, 4); } static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu, int size_in, int size_out, int crop_out) { u32 mant, frac, value, rep; sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep); /* set scale */ value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) | (mant << 12) | frac; sh_veu_reg_write(veu, VEU_RFCR, value); /* set clip */ value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) | (rep << 12) | crop_out; sh_veu_reg_write(veu, VEU_RFSR, value); return ALIGN((size_in * crop_out) / size_out, 4); } static void sh_veu_configure(struct sh_veu_dev *veu) { u32 src_width, src_stride, src_height; u32 dst_width, dst_stride, dst_height; u32 real_w, real_h; /* reset VEU */ sh_veu_reg_write(veu, VEU_BSRR, 0x100); src_width = veu->vfmt_in.frame.width; src_height = veu->vfmt_in.frame.height; src_stride = ALIGN(veu->vfmt_in.frame.width, 16); dst_width = real_w = veu->vfmt_out.frame.width; dst_height = real_h = veu->vfmt_out.frame.height; /* Datasheet is unclear - whether it's always number of bytes or not */ dst_stride = veu->vfmt_out.bytesperline; /* * So far real_w == dst_width && real_h == dst_height, but it wasn't * necessarily the case in the original vidix driver, so, it may change * here in the future too. */ src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width); src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height); sh_veu_reg_write(veu, VEU_SWR, src_stride); sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16)); sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */ sh_veu_reg_write(veu, VEU_EDWR, dst_stride); sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */ sh_veu_reg_write(veu, VEU_SWPR, 0x67); sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4); if (veu->is_2h) { sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5); sh_veu_reg_write(veu, VEU_MCR01, 0x0950); sh_veu_reg_write(veu, VEU_MCR02, 0x0000); sh_veu_reg_write(veu, VEU_MCR10, 0x397f); sh_veu_reg_write(veu, VEU_MCR11, 0x0950); sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd); sh_veu_reg_write(veu, VEU_MCR20, 0x0000); sh_veu_reg_write(veu, VEU_MCR21, 0x0950); sh_veu_reg_write(veu, VEU_MCR22, 0x1023); sh_veu_reg_write(veu, VEU_COFFR, 0x00800010); } } static int sh_veu_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct sh_veu_file *veu_file = priv; if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type)) return -EBUSY; if (veu_file->cfg_needed) { struct sh_veu_dev *veu = veu_file->veu_dev; veu_file->cfg_needed = false; sh_veu_configure(veu_file->veu_dev); veu->xaction = 0; veu->aborting = false; } return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type); } static int sh_veu_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct sh_veu_file *veu_file = priv; if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type)) return -EBUSY; return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type); } static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = { .vidioc_querycap = sh_veu_querycap, .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out, .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out, .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out, .vidioc_reqbufs = sh_veu_reqbufs, .vidioc_querybuf = sh_veu_querybuf, .vidioc_qbuf = sh_veu_qbuf, .vidioc_dqbuf = sh_veu_dqbuf, .vidioc_streamon = sh_veu_streamon, .vidioc_streamoff = sh_veu_streamoff, }; /* ========== Queue operations ========== */ static int sh_veu_queue_setup(struct vb2_queue *vq, const struct v4l2_format *f, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct sh_veu_dev *veu = vb2_get_drv_priv(vq); struct sh_veu_vfmt *vfmt; unsigned int size, count = *nbuffers; if (f) { const struct v4l2_pix_format *pix = &f->fmt.pix; const struct sh_veu_format *fmt = sh_veu_find_fmt(f); struct v4l2_format ftmp = *f; if (fmt->fourcc != pix->pixelformat) return -EINVAL; sh_veu_try_fmt(&ftmp, fmt); if (ftmp.fmt.pix.width != pix->width || ftmp.fmt.pix.height != pix->height) return -EINVAL; size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth : pix->width * pix->height * fmt->depth / fmt->ydepth; } else { vfmt = sh_veu_get_vfmt(veu, vq->type); size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; } if (count < 2) *nbuffers = count = 2; if (size * count > VIDEO_MEM_LIMIT) { count = VIDEO_MEM_LIMIT / size; *nbuffers = count; } *nplanes = 1; sizes[0] = size; alloc_ctxs[0] = veu->alloc_ctx; dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size); return 0; } static int sh_veu_buf_prepare(struct vb2_buffer *vb) { struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue); struct sh_veu_vfmt *vfmt; unsigned int sizeimage; vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type); sizeimage = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth; if (vb2_plane_size(vb, 0) < sizeimage) { dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n", __func__, vb2_plane_size(vb, 0), sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, sizeimage); return 0; } static void sh_veu_buf_queue(struct vb2_buffer *vb) { struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue); dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->v4l2_buf.type); v4l2_m2m_buf_queue(veu->m2m_ctx, vb); } static void sh_veu_wait_prepare(struct vb2_queue *q) { sh_veu_unlock(vb2_get_drv_priv(q)); } static void sh_veu_wait_finish(struct vb2_queue *q) { sh_veu_lock(vb2_get_drv_priv(q)); } static const struct vb2_ops sh_veu_qops = { .queue_setup = sh_veu_queue_setup, .buf_prepare = sh_veu_buf_prepare, .buf_queue = sh_veu_buf_queue, .wait_prepare = sh_veu_wait_prepare, .wait_finish = sh_veu_wait_finish, }; static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR; src_vq->drv_priv = priv; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &sh_veu_qops; src_vq->mem_ops = &vb2_dma_contig_memops; ret = vb2_queue_init(src_vq); if (ret < 0) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR; dst_vq->drv_priv = priv; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &sh_veu_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; return vb2_queue_init(dst_vq); } /* ========== File operations ========== */ static int sh_veu_open(struct file *file) { struct sh_veu_dev *veu = video_drvdata(file); struct sh_veu_file *veu_file; veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL); if (!veu_file) return -ENOMEM; veu_file->veu_dev = veu; veu_file->cfg_needed = true; file->private_data = veu_file; pm_runtime_get_sync(veu->dev); dev_dbg(veu->dev, "Created instance %p\n", veu_file); return 0; } static int sh_veu_release(struct file *file) { struct sh_veu_dev *veu = video_drvdata(file); struct sh_veu_file *veu_file = file->private_data; dev_dbg(veu->dev, "Releasing instance %p\n", veu_file); if (veu_file == veu->capture) { veu->capture = NULL; vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)); } if (veu_file == veu->output) { veu->output = NULL; vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT)); } if (!veu->output && !veu->capture && veu->m2m_ctx) { v4l2_m2m_ctx_release(veu->m2m_ctx); veu->m2m_ctx = NULL; } pm_runtime_put(veu->dev); kfree(veu_file); return 0; } static unsigned int sh_veu_poll(struct file *file, struct poll_table_struct *wait) { struct sh_veu_file *veu_file = file->private_data; return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait); } static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma) { struct sh_veu_file *veu_file = file->private_data; return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma); } static const struct v4l2_file_operations sh_veu_fops = { .owner = THIS_MODULE, .open = sh_veu_open, .release = sh_veu_release, .poll = sh_veu_poll, .unlocked_ioctl = video_ioctl2, .mmap = sh_veu_mmap, }; static const struct video_device sh_veu_videodev = { .name = "sh-veu", .fops = &sh_veu_fops, .ioctl_ops = &sh_veu_ioctl_ops, .minor = -1, .release = video_device_release_empty, .vfl_dir = VFL_DIR_M2M, }; static const struct v4l2_m2m_ops sh_veu_m2m_ops = { .device_run = sh_veu_device_run, .job_abort = sh_veu_job_abort, }; static irqreturn_t sh_veu_bh(int irq, void *dev_id) { struct sh_veu_dev *veu = dev_id; if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) { v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx); veu->xaction = 0; } else { sh_veu_device_run(veu); } return IRQ_HANDLED; } static irqreturn_t sh_veu_isr(int irq, void *dev_id) { struct sh_veu_dev *veu = dev_id; struct vb2_buffer *dst; struct vb2_buffer *src; u32 status = sh_veu_reg_read(veu, VEU_EVTR); /* bundle read mode not used */ if (!(status & 1)) return IRQ_NONE; /* disable interrupt in VEU */ sh_veu_reg_write(veu, VEU_EIER, 0); /* halt operation */ sh_veu_reg_write(veu, VEU_STR, 0); /* ack int, write 0 to clear bits */ sh_veu_reg_write(veu, VEU_EVTR, status & ~1); /* conversion completed */ dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx); src = v4l2_m2m_src_buf_remove(veu->m2m_ctx); if (!src || !dst) return IRQ_NONE; spin_lock(&veu->lock); v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); spin_unlock(&veu->lock); veu->xaction++; return IRQ_WAKE_THREAD; } static int sh_veu_probe(struct platform_device *pdev) { struct sh_veu_dev *veu; struct resource *reg_res; struct video_device *vdev; int irq, ret; reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!reg_res || irq <= 0) { dev_err(&pdev->dev, "Insufficient VEU platform information.\n"); return -ENODEV; } veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL); if (!veu) return -ENOMEM; veu->is_2h = resource_size(reg_res) == 0x22c; veu->base = devm_ioremap_resource(&pdev->dev, reg_res); if (IS_ERR(veu->base)) return PTR_ERR(veu->base); ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh, 0, "veu", veu); if (ret < 0) return ret; ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev); if (ret < 0) { dev_err(&pdev->dev, "Error registering v4l2 device\n"); return ret; } vdev = &veu->vdev; veu->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(veu->alloc_ctx)) { ret = PTR_ERR(veu->alloc_ctx); goto einitctx; } *vdev = sh_veu_videodev; spin_lock_init(&veu->lock); mutex_init(&veu->fop_lock); vdev->lock = &veu->fop_lock; video_set_drvdata(vdev, veu); veu->dev = &pdev->dev; veu->vfmt_out = DEFAULT_OUT_VFMT; veu->vfmt_in = DEFAULT_IN_VFMT; veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops); if (IS_ERR(veu->m2m_dev)) { ret = PTR_ERR(veu->m2m_dev); v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret); goto em2minit; } pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); pm_runtime_suspend(&pdev->dev); if (ret < 0) goto evidreg; return ret; evidreg: pm_runtime_disable(&pdev->dev); v4l2_m2m_release(veu->m2m_dev); em2minit: vb2_dma_contig_cleanup_ctx(veu->alloc_ctx); einitctx: v4l2_device_unregister(&veu->v4l2_dev); return ret; } static int sh_veu_remove(struct platform_device *pdev) { struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct sh_veu_dev *veu = container_of(v4l2_dev, struct sh_veu_dev, v4l2_dev); video_unregister_device(&veu->vdev); pm_runtime_disable(&pdev->dev); v4l2_m2m_release(veu->m2m_dev); vb2_dma_contig_cleanup_ctx(veu->alloc_ctx); v4l2_device_unregister(&veu->v4l2_dev); return 0; } static struct platform_driver __refdata sh_veu_pdrv = { .remove = sh_veu_remove, .driver = { .name = "sh_veu", .owner = THIS_MODULE, }, }; module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe); MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver"); MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
lexmar93/Siyah-i777
drivers/net/wireless/libertas/tx.c
2323
5441
/* * This file contains the handling of TX in wlan driver. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <net/cfg80211.h> #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" /** * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) * * @rate: Input rate * returns: Output Rate (0 if invalid) */ static u32 convert_radiotap_rate_to_mv(u8 rate) { switch (rate) { case 2: /* 1 Mbps */ return 0 | (1 << 4); case 4: /* 2 Mbps */ return 1 | (1 << 4); case 11: /* 5.5 Mbps */ return 2 | (1 << 4); case 22: /* 11 Mbps */ return 3 | (1 << 4); case 12: /* 6 Mbps */ return 4 | (1 << 4); case 18: /* 9 Mbps */ return 5 | (1 << 4); case 24: /* 12 Mbps */ return 6 | (1 << 4); case 36: /* 18 Mbps */ return 7 | (1 << 4); case 48: /* 24 Mbps */ return 8 | (1 << 4); case 72: /* 36 Mbps */ return 9 | (1 << 4); case 96: /* 48 Mbps */ return 10 | (1 << 4); case 108: /* 54 Mbps */ return 11 | (1 << 4); } return 0; } /** * lbs_hard_start_xmit - checks the conditions and sends packet to IF * layer if everything is ok * * @skb: A pointer to skb which includes TX packet * @dev: A pointer to the &struct net_device * returns: 0 or -1 */ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; struct lbs_private *priv = dev->ml_priv; struct txpd *txpd; char *p802x_hdr; uint16_t pkt_len; netdev_tx_t ret = NETDEV_TX_OK; lbs_deb_enter(LBS_DEB_TX); /* We need to protect against the queues being restarted before we get round to stopping them */ spin_lock_irqsave(&priv->driver_lock, flags); if (priv->surpriseremoved) goto free; if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); /* We'll never manage to send this one; drop it and return 'OK' */ dev->stats.tx_dropped++; dev->stats.tx_errors++; goto free; } netif_stop_queue(priv->dev); if (priv->mesh_dev) netif_stop_queue(priv->mesh_dev); if (priv->tx_pending_len) { /* This can happen if packets come in on the mesh and eth device simultaneously -- there's no mutual exclusion on hard_start_xmit() calls between devices. */ lbs_deb_tx("Packet on %s while busy\n", dev->name); ret = NETDEV_TX_BUSY; goto unlock; } priv->tx_pending_len = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); txpd = (void *)priv->tx_pending_buf; memset(txpd, 0, sizeof(struct txpd)); p802x_hdr = skb->data; pkt_len = skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); /* skip the radiotap header */ p802x_hdr += sizeof(*rtap_hdr); pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); lbs_mesh_set_txpd(priv, dev, txpd); lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); spin_lock_irqsave(&priv->driver_lock, flags); priv->tx_pending_len = pkt_len + sizeof(struct txpd); lbs_deb_tx("%s lined up packet\n", __func__); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* Keep the skb around for when we get feedback */ priv->currenttxskb = skb; } else { free: dev_kfree_skb_any(skb); } unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); return ret; } /** * lbs_send_tx_feedback - sends to the host the last transmitted packet, * filling the radiotap headers with transmission information. * * @priv: A pointer to &struct lbs_private structure * @try_count: A 32-bit value containing transmission retry status. * * returns: void */ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; radiotap_hdr->data_retries = try_count ? (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && lbs_mesh_connected(priv)) netif_wake_queue(priv->mesh_dev); } EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
gpl-2.0
mkasick/android_kernel_samsung_d2vzw
drivers/net/wireless/libertas/tx.c
2323
5441
/* * This file contains the handling of TX in wlan driver. */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/sched.h> #include <net/cfg80211.h> #include "host.h" #include "radiotap.h" #include "decl.h" #include "defs.h" #include "dev.h" /** * convert_radiotap_rate_to_mv - converts Tx/Rx rates from IEEE80211_RADIOTAP_RATE * units (500 Kb/s) into Marvell WLAN format (see Table 8 in Section 3.2.1) * * @rate: Input rate * returns: Output Rate (0 if invalid) */ static u32 convert_radiotap_rate_to_mv(u8 rate) { switch (rate) { case 2: /* 1 Mbps */ return 0 | (1 << 4); case 4: /* 2 Mbps */ return 1 | (1 << 4); case 11: /* 5.5 Mbps */ return 2 | (1 << 4); case 22: /* 11 Mbps */ return 3 | (1 << 4); case 12: /* 6 Mbps */ return 4 | (1 << 4); case 18: /* 9 Mbps */ return 5 | (1 << 4); case 24: /* 12 Mbps */ return 6 | (1 << 4); case 36: /* 18 Mbps */ return 7 | (1 << 4); case 48: /* 24 Mbps */ return 8 | (1 << 4); case 72: /* 36 Mbps */ return 9 | (1 << 4); case 96: /* 48 Mbps */ return 10 | (1 << 4); case 108: /* 54 Mbps */ return 11 | (1 << 4); } return 0; } /** * lbs_hard_start_xmit - checks the conditions and sends packet to IF * layer if everything is ok * * @skb: A pointer to skb which includes TX packet * @dev: A pointer to the &struct net_device * returns: 0 or -1 */ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long flags; struct lbs_private *priv = dev->ml_priv; struct txpd *txpd; char *p802x_hdr; uint16_t pkt_len; netdev_tx_t ret = NETDEV_TX_OK; lbs_deb_enter(LBS_DEB_TX); /* We need to protect against the queues being restarted before we get round to stopping them */ spin_lock_irqsave(&priv->driver_lock, flags); if (priv->surpriseremoved) goto free; if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); /* We'll never manage to send this one; drop it and return 'OK' */ dev->stats.tx_dropped++; dev->stats.tx_errors++; goto free; } netif_stop_queue(priv->dev); if (priv->mesh_dev) netif_stop_queue(priv->mesh_dev); if (priv->tx_pending_len) { /* This can happen if packets come in on the mesh and eth device simultaneously -- there's no mutual exclusion on hard_start_xmit() calls between devices. */ lbs_deb_tx("Packet on %s while busy\n", dev->name); ret = NETDEV_TX_BUSY; goto unlock; } priv->tx_pending_len = -1; spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); txpd = (void *)priv->tx_pending_buf; memset(txpd, 0, sizeof(struct txpd)); p802x_hdr = skb->data; pkt_len = skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { struct tx_radiotap_hdr *rtap_hdr = (void *)skb->data; /* set txpd fields from the radiotap header */ txpd->tx_control = cpu_to_le32(convert_radiotap_rate_to_mv(rtap_hdr->rate)); /* skip the radiotap header */ p802x_hdr += sizeof(*rtap_hdr); pkt_len -= sizeof(*rtap_hdr); /* copy destination address from 802.11 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr + 4, ETH_ALEN); } else { /* copy destination address from 802.3 header */ memcpy(txpd->tx_dest_addr_high, p802x_hdr, ETH_ALEN); } txpd->tx_packet_length = cpu_to_le16(pkt_len); txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); lbs_mesh_set_txpd(priv, dev, txpd); lbs_deb_hex(LBS_DEB_TX, "txpd", (u8 *) &txpd, sizeof(struct txpd)); lbs_deb_hex(LBS_DEB_TX, "Tx Data", (u8 *) p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); spin_lock_irqsave(&priv->driver_lock, flags); priv->tx_pending_len = pkt_len + sizeof(struct txpd); lbs_deb_tx("%s lined up packet\n", __func__); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; if (priv->wdev->iftype == NL80211_IFTYPE_MONITOR) { /* Keep the skb to echo it back once Tx feedback is received from FW */ skb_orphan(skb); /* Keep the skb around for when we get feedback */ priv->currenttxskb = skb; } else { free: dev_kfree_skb_any(skb); } unlock: spin_unlock_irqrestore(&priv->driver_lock, flags); wake_up(&priv->waitq); lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); return ret; } /** * lbs_send_tx_feedback - sends to the host the last transmitted packet, * filling the radiotap headers with transmission information. * * @priv: A pointer to &struct lbs_private structure * @try_count: A 32-bit value containing transmission retry status. * * returns: void */ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count) { struct tx_radiotap_hdr *radiotap_hdr; if (priv->wdev->iftype != NL80211_IFTYPE_MONITOR || priv->currenttxskb == NULL) return; radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; radiotap_hdr->data_retries = try_count ? (1 + priv->txretrycount - try_count) : 0; priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, priv->dev); netif_rx(priv->currenttxskb); priv->currenttxskb = NULL; if (priv->connect_status == LBS_CONNECTED) netif_wake_queue(priv->dev); if (priv->mesh_dev && lbs_mesh_connected(priv)) netif_wake_queue(priv->mesh_dev); } EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
gpl-2.0
duki994/SM-G850_Kernel_LP
drivers/mtd/maps/latch-addr-flash.c
2323
5523
/* * Interface for NOR flash driver whose high address lines are latched * * Copyright © 2000 Nicolas Pitre <nico@cam.org> * Copyright © 2005-2008 Analog Devices Inc. * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/platform_device.h> #include <linux/mtd/latch-addr-flash.h> #include <linux/slab.h> #define DRIVER_NAME "latch-addr-flash" struct latch_addr_flash_info { struct mtd_info *mtd; struct map_info map; struct resource *res; void (*set_window)(unsigned long offset, void *data); void *data; /* cache; could be found out of res */ unsigned long win_mask; spinlock_t lock; }; static map_word lf_read(struct map_info *map, unsigned long ofs) { struct latch_addr_flash_info *info; map_word datum; info = (struct latch_addr_flash_info *)map->map_priv_1; spin_lock(&info->lock); info->set_window(ofs, info->data); datum = inline_map_read(map, info->win_mask & ofs); spin_unlock(&info->lock); return datum; } static void lf_write(struct map_info *map, map_word datum, unsigned long ofs) { struct latch_addr_flash_info *info; info = (struct latch_addr_flash_info *)map->map_priv_1; spin_lock(&info->lock); info->set_window(ofs, info->data); inline_map_write(map, datum, info->win_mask & ofs); spin_unlock(&info->lock); } static void lf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { struct latch_addr_flash_info *info = (struct latch_addr_flash_info *) map->map_priv_1; unsigned n; while (len > 0) { n = info->win_mask + 1 - (from & info->win_mask); if (n > len) n = len; spin_lock(&info->lock); info->set_window(from, info->data); memcpy_fromio(to, map->virt + (from & info->win_mask), n); spin_unlock(&info->lock); to += n; from += n; len -= n; } } static char *rom_probe_types[] = { "cfi_probe", NULL }; static int latch_addr_flash_remove(struct platform_device *dev) { struct latch_addr_flash_info *info; struct latch_addr_flash_data *latch_addr_data; info = platform_get_drvdata(dev); if (info == NULL) return 0; platform_set_drvdata(dev, NULL); latch_addr_data = dev->dev.platform_data; if (info->mtd != NULL) { mtd_device_unregister(info->mtd); map_destroy(info->mtd); } if (info->map.virt != NULL) iounmap(info->map.virt); if (info->res != NULL) release_mem_region(info->res->start, resource_size(info->res)); kfree(info); if (latch_addr_data->done) latch_addr_data->done(latch_addr_data->data); return 0; } static int latch_addr_flash_probe(struct platform_device *dev) { struct latch_addr_flash_data *latch_addr_data; struct latch_addr_flash_info *info; resource_size_t win_base = dev->resource->start; resource_size_t win_size = resource_size(dev->resource); char **probe_type; int chipsel; int err; latch_addr_data = dev->dev.platform_data; if (latch_addr_data == NULL) return -ENODEV; pr_notice("latch-addr platform flash device: %#llx byte " "window at %#.8llx\n", (unsigned long long)win_size, (unsigned long long)win_base); chipsel = dev->id; if (latch_addr_data->init) { err = latch_addr_data->init(latch_addr_data->data, chipsel); if (err != 0) return err; } info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL); if (info == NULL) { err = -ENOMEM; goto done; } platform_set_drvdata(dev, info); info->res = request_mem_region(win_base, win_size, DRIVER_NAME); if (info->res == NULL) { dev_err(&dev->dev, "Could not reserve memory region\n"); err = -EBUSY; goto free_info; } info->map.name = DRIVER_NAME; info->map.size = latch_addr_data->size; info->map.bankwidth = latch_addr_data->width; info->map.phys = NO_XIP; info->map.virt = ioremap(win_base, win_size); if (!info->map.virt) { err = -ENOMEM; goto free_res; } info->map.map_priv_1 = (unsigned long)info; info->map.read = lf_read; info->map.copy_from = lf_copy_from; info->map.write = lf_write; info->set_window = latch_addr_data->set_window; info->data = latch_addr_data->data; info->win_mask = win_size - 1; spin_lock_init(&info->lock); for (probe_type = rom_probe_types; !info->mtd && *probe_type; probe_type++) info->mtd = do_map_probe(*probe_type, &info->map); if (info->mtd == NULL) { dev_err(&dev->dev, "map_probe failed\n"); err = -ENODEV; goto iounmap; } info->mtd->owner = THIS_MODULE; mtd_device_parse_register(info->mtd, NULL, NULL, latch_addr_data->parts, latch_addr_data->nr_parts); return 0; iounmap: iounmap(info->map.virt); free_res: release_mem_region(info->res->start, resource_size(info->res)); free_info: kfree(info); done: if (latch_addr_data->done) latch_addr_data->done(latch_addr_data->data); return err; } static struct platform_driver latch_addr_flash_driver = { .probe = latch_addr_flash_probe, .remove = latch_addr_flash_remove, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(latch_addr_flash_driver); MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper " "address lines being set board specifically"); MODULE_LICENSE("GPL v2");
gpl-2.0
AayushRd7/Xeski
drivers/hwmon/wm831x-hwmon.c
2835
6095
/* * drivers/hwmon/wm831x-hwmon.c - Wolfson Microelectronics WM831x PMIC * hardware monitoring features. * * Copyright (C) 2009 Wolfson Microelectronics plc * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License v2 as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/auxadc.h> struct wm831x_hwmon { struct wm831x *wm831x; struct device *classdev; }; static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "wm831x\n"); } static const char * const input_names[] = { [WM831X_AUX_SYSVDD] = "SYSVDD", [WM831X_AUX_USB] = "USB", [WM831X_AUX_BKUP_BATT] = "Backup battery", [WM831X_AUX_BATT] = "Battery", [WM831X_AUX_WALL] = "WALL", [WM831X_AUX_CHIP_TEMP] = "PMIC", [WM831X_AUX_BATT_TEMP] = "Battery", }; static ssize_t show_voltage(struct device *dev, struct device_attribute *attr, char *buf) { struct wm831x_hwmon *hwmon = dev_get_drvdata(dev); int channel = to_sensor_dev_attr(attr)->index; int ret; ret = wm831x_auxadc_read_uv(hwmon->wm831x, channel); if (ret < 0) return ret; return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(ret, 1000)); } static ssize_t show_chip_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct wm831x_hwmon *hwmon = dev_get_drvdata(dev); int channel = to_sensor_dev_attr(attr)->index; int ret; ret = wm831x_auxadc_read(hwmon->wm831x, channel); if (ret < 0) return ret; /* Degrees celsius = (512.18-ret) / 1.0983 */ ret = 512180 - (ret * 1000); ret = DIV_ROUND_CLOSEST(ret * 10000, 10983); return sprintf(buf, "%d\n", ret); } static ssize_t show_label(struct device *dev, struct device_attribute *attr, char *buf) { int channel = to_sensor_dev_attr(attr)->index; return sprintf(buf, "%s\n", input_names[channel]); } #define WM831X_VOLTAGE(id, name) \ static SENSOR_DEVICE_ATTR(in##id##_input, S_IRUGO, show_voltage, \ NULL, name) #define WM831X_NAMED_VOLTAGE(id, name) \ WM831X_VOLTAGE(id, name); \ static SENSOR_DEVICE_ATTR(in##id##_label, S_IRUGO, show_label, \ NULL, name) static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); WM831X_VOLTAGE(0, WM831X_AUX_AUX1); WM831X_VOLTAGE(1, WM831X_AUX_AUX2); WM831X_VOLTAGE(2, WM831X_AUX_AUX3); WM831X_VOLTAGE(3, WM831X_AUX_AUX4); WM831X_NAMED_VOLTAGE(4, WM831X_AUX_SYSVDD); WM831X_NAMED_VOLTAGE(5, WM831X_AUX_USB); WM831X_NAMED_VOLTAGE(6, WM831X_AUX_BATT); WM831X_NAMED_VOLTAGE(7, WM831X_AUX_WALL); WM831X_NAMED_VOLTAGE(8, WM831X_AUX_BKUP_BATT); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_chip_temp, NULL, WM831X_AUX_CHIP_TEMP); static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, WM831X_AUX_CHIP_TEMP); /* * Report as a voltage since conversion depends on external components * and that's what the ABI wants. */ static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_voltage, NULL, WM831X_AUX_BATT_TEMP); static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, WM831X_AUX_BATT_TEMP); static struct attribute *wm831x_attributes[] = { &dev_attr_name.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_label.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_label.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_label.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_label.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_label.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp2_label.dev_attr.attr, NULL }; static const struct attribute_group wm831x_attr_group = { .attrs = wm831x_attributes, }; static int wm831x_hwmon_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_hwmon *hwmon; int ret; hwmon = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_hwmon), GFP_KERNEL); if (!hwmon) return -ENOMEM; hwmon->wm831x = wm831x; ret = sysfs_create_group(&pdev->dev.kobj, &wm831x_attr_group); if (ret) return ret; hwmon->classdev = hwmon_device_register(&pdev->dev); if (IS_ERR(hwmon->classdev)) { ret = PTR_ERR(hwmon->classdev); goto err_sysfs; } platform_set_drvdata(pdev, hwmon); return 0; err_sysfs: sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group); return ret; } static int wm831x_hwmon_remove(struct platform_device *pdev) { struct wm831x_hwmon *hwmon = platform_get_drvdata(pdev); hwmon_device_unregister(hwmon->classdev); sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group); return 0; } static struct platform_driver wm831x_hwmon_driver = { .probe = wm831x_hwmon_probe, .remove = wm831x_hwmon_remove, .driver = { .name = "wm831x-hwmon", .owner = THIS_MODULE, }, }; module_platform_driver(wm831x_hwmon_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM831x Hardware Monitoring"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-hwmon");
gpl-2.0
IndieBeto/moggy
arch/arm/mach-msm/msm_bus/msm_bus_rpm.c
3347
27131
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/module.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/rpm.h> #include "msm_bus_core.h" #include "../rpm_resources.h" void msm_bus_rpm_set_mt_mask() { #ifdef CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED struct msm_rpm_iv_pair mt[1]; int mask = MSM_RPMRS_MASK_RPM_CTL_MULTI_TIER; mt[0].id = MSM_RPM_ID_RPM_CTL; mt[0].value = 2; msm_rpmrs_set_bits_noirq(MSM_RPM_CTX_SET_0, mt, 1, &mask); #endif } bool msm_bus_rpm_is_mem_interleaved(void) { int status = 0; struct msm_rpm_iv_pair il[2]; uint16_t id[2]; il[0].value = 0; il[1].value = 0; status = msm_bus_board_rpm_get_il_ids(id); if (status) { MSM_BUS_DBG("Dynamic check not supported, " "default: Interleaved memory\n"); goto inter; } il[0].id = id[0]; il[1].id = id[1]; status = msm_rpm_get_status(il, ARRAY_SIZE(il)); if (status) { MSM_BUS_ERR("Status read for interleaving returned: %d\n" "Using interleaved memory by default\n", status); goto inter; } /* * If the start address of EBI1-CH0 is the same as * the start address of EBI1-CH1, the memory is interleaved. * The start addresses are stored in the 16 MSBs of the status * register */ if ((il[0].value & 0xFFFF0000) != (il[1].value & 0xFFFF0000)) { MSM_BUS_DBG("Non-interleaved memory\n"); return false; } inter: MSM_BUS_DBG("Interleaved memory\n"); return true; } #ifndef CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED struct commit_data { uint16_t *bwsum; uint16_t *arb; unsigned long *actarb; }; /* * The following macros are used for various operations on commit data. * Commit data is an array of 32 bit integers. The size of arrays is unique * to the fabric. Commit arrays are allocated at run-time based on the number * of masters, slaves and tiered-slaves registered. */ #define MSM_BUS_GET_BW_INFO(val, type, bw) \ do { \ (type) = MSM_BUS_GET_BW_TYPE(val); \ (bw) = MSM_BUS_GET_BW(val); \ } while (0) #define MSM_BUS_GET_BW_INFO_BYTES (val, type, bw) \ do { \ (type) = MSM_BUS_GET_BW_TYPE(val); \ (bw) = msm_bus_get_bw_bytes(val); \ } while (0) #define ROUNDED_BW_VAL_FROM_BYTES(bw) \ ((((bw) >> 17) + 1) & 0x8000 ? 0x7FFF : (((bw) >> 17) + 1)) #define BW_VAL_FROM_BYTES(bw) \ ((((bw) >> 17) & 0x8000) ? 0x7FFF : ((bw) >> 17)) static uint32_t msm_bus_set_bw_bytes(unsigned long bw) { return ((((bw) & 0x1FFFF) && (((bw) >> 17) == 0)) ? ROUNDED_BW_VAL_FROM_BYTES(bw) : BW_VAL_FROM_BYTES(bw)); } uint64_t msm_bus_get_bw_bytes(unsigned long val) { return ((val) & 0x7FFF) << 17; } uint16_t msm_bus_get_bw(unsigned long val) { return (val)&0x7FFF; } static uint16_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw) { return ((((type) == MSM_BUS_BW_TIER1 ? 1 : 0) << 15) | (msm_bus_set_bw_bytes(bw))); }; uint16_t msm_bus_create_bw_tier_pair(uint8_t type, unsigned long bw) { return (((type) == MSM_BUS_BW_TIER1 ? 1 : 0) << 15) | ((bw) & 0x7FFF); } void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, void *cdata, int nmasters, int nslaves, int ntslaves) { int j, c; struct commit_data *cd = (struct commit_data *)cdata; *curr += scnprintf(buf + *curr, max_size - *curr, "BWSum:\n"); for (c = 0; c < nslaves; c++) *curr += scnprintf(buf + *curr, max_size - *curr, "0x%x\t", cd->bwsum[c]); *curr += scnprintf(buf + *curr, max_size - *curr, "\nArb:"); for (c = 0; c < ntslaves; c++) { *curr += scnprintf(buf + *curr, max_size - *curr, "\nTSlave %d:\n", c); for (j = 0; j < nmasters; j++) *curr += scnprintf(buf + *curr, max_size - *curr, " 0x%x\t", cd->arb[(c * nmasters) + j]); } } /** * allocate_commit_data() - Allocate the data for commit array in the * format specified by RPM * @fabric: Fabric device for which commit data is allocated */ static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata, void **cdata, int ctx) { struct commit_data **cd = (struct commit_data **)cdata; *cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL); if (!*cd) { MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); return -ENOMEM; } (*cd)->bwsum = kzalloc((sizeof(uint16_t) * fab_pdata->nslaves), GFP_KERNEL); if (!(*cd)->bwsum) { MSM_BUS_DBG("Couldn't alloc mem for slaves\n"); kfree(*cd); return -ENOMEM; } (*cd)->arb = kzalloc(((sizeof(uint16_t *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->arb) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->bwsum); kfree(*cd); return -ENOMEM; } (*cd)->actarb = kzalloc(((sizeof(unsigned long *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->actarb) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->bwsum); kfree((*cd)->arb); kfree(*cd); return -ENOMEM; } return 0; } static void free_commit_data(void *cdata) { struct commit_data *cd = (struct commit_data *)cdata; kfree(cd->bwsum); kfree(cd->arb); kfree(cd->actarb); kfree(cd); } /** * allocate_rpm_data() - Allocate the id-value pairs to be * sent to RPM */ static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev, struct msm_bus_fabric_registration *fab_pdata) { struct msm_rpm_iv_pair *rpm_data; uint16_t count = ((fab_pdata->nmasters * fab_pdata->ntieredslaves) + fab_pdata->nslaves + 1)/2; rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count), GFP_KERNEL); return (void *)rpm_data; } #define BWMASK 0x7FFF #define TIERMASK 0x8000 #define GET_TIER(n) (((n) & TIERMASK) >> 15) static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info, struct msm_bus_fabric_registration *fab_pdata, void *sel_cdata, int *master_tiers, int64_t add_bw) { int index, i, j, tiers, ports; struct commit_data *sel_cd = (struct commit_data *)sel_cdata; add_bw = INTERLEAVED_BW(fab_pdata, add_bw, info->node_info->num_mports); ports = INTERLEAVED_VAL(fab_pdata, info->node_info->num_mports); tiers = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_tiers); for (i = 0; i < tiers; i++) { for (j = 0; j < ports; j++) { uint16_t hop_tier; /* * For interleaved gateway ports and slave ports, * there is one-one mapping between gateway port and * the slave port */ if (info->node_info->gateway && i != j && (hop->node_info->num_sports > 1)) continue; if (!hop->node_info->tier) hop_tier = MSM_BUS_BW_TIER2 - 1; else hop_tier = hop->node_info->tier[i] - 1; index = ((hop_tier * fab_pdata->nmasters) + (info->node_info->masterp[j])); /* If there is tier, calculate arb for commit */ if (hop->node_info->tier) { uint16_t tier; unsigned long tieredbw = sel_cd->actarb[index]; if (GET_TIER(sel_cd->arb[index])) tier = MSM_BUS_BW_TIER1; else if (master_tiers) /* * By default master is only in the * tier specified by default. * To change the default tier, client * needs to explicitly request for a * different supported tier */ tier = master_tiers[0]; else tier = MSM_BUS_BW_TIER2; /* * Make sure gateway to slave port bandwidth * is not divided when slave is interleaved */ if (info->node_info->gateway && hop->node_info->num_sports > 1) tieredbw += add_bw; else tieredbw += INTERLEAVED_BW(fab_pdata, add_bw, hop->node_info-> num_sports); /* If bw is 0, update tier to default */ if (!tieredbw) tier = MSM_BUS_BW_TIER2; /* Update Arb for fab,get HW Mport from enum */ sel_cd->arb[index] = msm_bus_create_bw_tier_pair_bytes(tier, tieredbw); sel_cd->actarb[index] = tieredbw; MSM_BUS_DBG("tr:%d mpor:%d tbw:%ld bws: %lld\n", hop_tier, info->node_info->masterp[i], tieredbw, *hop->link_info.sel_bw); } } } /* Update bwsum for slaves on fabric */ ports = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_sports); for (i = 0; i < ports; i++) { sel_cd->bwsum[hop->node_info->slavep[i]] = (uint16_t)msm_bus_create_bw_tier_pair_bytes(0, (uint32_t)msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); MSM_BUS_DBG("slavep:%d, link_bw: %u\n", hop->node_info->slavep[i], (uint32_t) msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); } } #define RPM_SHIFT_VAL 16 #define RPM_SHIFT(n) ((n) << RPM_SHIFT_VAL) static int msm_bus_rpm_compare_cdata( struct msm_bus_fabric_registration *fab_pdata, struct commit_data *cd1, struct commit_data *cd2) { size_t n; int ret; n = sizeof(uint16_t) * fab_pdata->nslaves; ret = memcmp(cd1->bwsum, cd2->bwsum, n); if (ret) { MSM_BUS_DBG("Commit Data bwsum not equal\n"); return ret; } n = sizeof(uint16_t *) * ((fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1); ret = memcmp(cd1->arb, cd2->arb, n); if (ret) { MSM_BUS_DBG("Commit Data arb[%d] not equal\n", n); return ret; } return 0; } static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration *fab_pdata, int ctx, struct msm_rpm_iv_pair *rpm_data, struct commit_data *cd, bool valid) { int i, j, offset = 0, status = 0, count, index = 0; /* * count is the number of 2-byte words required to commit the * data to rpm. This is calculated by the following formula. * Commit data is split into two arrays: * 1. arb[nmasters * ntieredslaves] * 2. bwsum[nslaves] */ count = ((fab_pdata->nmasters * fab_pdata->ntieredslaves) + (fab_pdata->nslaves) + 1)/2; offset = fab_pdata->offset; /* * Copy bwsum to rpm data * Since bwsum is uint16, the values need to be adjusted to * be copied to value field of rpm-data, which is 32 bits. */ for (i = 0; i < (fab_pdata->nslaves - 1); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT(*(cd->bwsum + i + 1)) | *(cd->bwsum + i); index++; } /* Account for odd number of slaves */ if (fab_pdata->nslaves & 1) { rpm_data[index].id = offset + index; rpm_data[index].value = *(cd->arb); rpm_data[index].value = RPM_SHIFT(rpm_data[index].value) | *(cd->bwsum + i); index++; i = 1; } else i = 0; /* Copy arb values to rpm data */ for (; i < (fab_pdata->ntieredslaves * fab_pdata->nmasters); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT(*(cd->arb + i + 1)) | *(cd->arb + i); index++; } MSM_BUS_DBG("rpm data for fab: %d\n", fab_pdata->id); for (i = 0; i < count; i++) MSM_BUS_DBG("%d %x\n", rpm_data[i].id, rpm_data[i].value); MSM_BUS_DBG("Commit Data: Fab: %d BWSum:\n", fab_pdata->id); for (i = 0; i < fab_pdata->nslaves; i++) MSM_BUS_DBG("fab_slaves:0x%x\n", cd->bwsum[i]); MSM_BUS_DBG("Commit Data: Fab: %d Arb:\n", fab_pdata->id); for (i = 0; i < fab_pdata->ntieredslaves; i++) { MSM_BUS_DBG("tiered-slave: %d\n", i); for (j = 0; j < fab_pdata->nmasters; j++) MSM_BUS_DBG(" 0x%x\n", cd->arb[(i * fab_pdata->nmasters) + j]); } MSM_BUS_DBG("calling msm_rpm_set: %d\n", status); msm_bus_dbg_commit_data(fab_pdata->name, cd, fab_pdata-> nmasters, fab_pdata->nslaves, fab_pdata->ntieredslaves, MSM_BUS_DBG_OP); if (fab_pdata->rpm_enabled) { if (valid) { if (ctx == ACTIVE_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } } else { if (ctx == ACTIVE_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } } } return status; } #else #define NUM_TIERS 2 #define RPM_SHIFT24(n) ((n) << 24) #define RPM_SHIFT16(n) ((n) << 16) #define RPM_SHIFT8(n) ((n) << 8) struct commit_data { uint16_t *bwsum; uint8_t *arb[NUM_TIERS]; unsigned long *actarb[NUM_TIERS]; }; #define MODE_BIT(val) ((val) & 0x80) #define MODE0_IMM(val) ((val) & 0xF) #define MODE0_SHIFT(val) (((val) & 0x70) >> 4) #define MODE1_STEP 48 /* 48 MB */ #define MODE1_OFFSET 512 /* 512 MB */ #define MODE1_IMM(val) ((val) & 0x7F) #define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x)) static uint8_t msm_bus_set_bw_bytes(unsigned long val) { unsigned int shift; unsigned int intVal; unsigned char result; /* Convert to MB */ intVal = (unsigned int)((val + ((1 << 20) - 1)) >> 20); /** * Divide by 2^20 and round up * A value graeter than 0x1E0 will round up to 512 and overflow * Mode 0 so it should be made Mode 1 */ if (0x1E0 > intVal) { /** * MODE 0 * Compute the shift value * Shift value is 32 - the number of leading zeroes - * 4 to save the most significant 4 bits of the value */ shift = 32 - 4 - min((uint8_t)28, (uint8_t)__CLZ(intVal)); /* Add min value - 1 to force a round up when shifting right */ intVal += (1 << shift) - 1; /* Recompute the shift value in case there was an overflow */ shift = 32 - 4 - min((uint8_t)28, (uint8_t)__CLZ(intVal)); /* Clear the mode bit (msb) and fill in the fields */ result = ((0x70 & (shift << 4)) | (0x0F & (intVal >> shift))); } else { /* MODE 1 */ result = (unsigned char)(0x80 | ((intVal - MODE1_OFFSET + MODE1_STEP - 1) / MODE1_STEP)); } return result; } uint64_t msm_bus_get_bw(unsigned long val) { return MODE_BIT(val) ? /* Mode 1 */ (MODE1_IMM(val) * MODE1_STEP + MODE1_OFFSET) : /* Mode 0 */ (MODE0_IMM(val) << MODE0_SHIFT(val)); } uint64_t msm_bus_get_bw_bytes(unsigned long val) { return msm_bus_get_bw(val) << 20; } static uint8_t msm_bus_create_bw_tier_pair_bytes(uint8_t type, unsigned long bw) { return msm_bus_set_bw_bytes(bw); }; uint8_t msm_bus_create_bw_tier_pair(uint8_t type, unsigned long bw) { return msm_bus_create_bw_tier_pair_bytes(type, bw); }; static int msm_bus_rpm_allocate_commit_data(struct msm_bus_fabric_registration *fab_pdata, void **cdata, int ctx) { struct commit_data **cd = (struct commit_data **)cdata; int i; *cd = kzalloc(sizeof(struct commit_data), GFP_KERNEL); if (!*cd) { MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); goto cdata_err; } (*cd)->bwsum = kzalloc((sizeof(uint16_t) * fab_pdata->nslaves), GFP_KERNEL); if (!(*cd)->bwsum) { MSM_BUS_DBG("Couldn't alloc mem for slaves\n"); goto bwsum_err; } for (i = 0; i < NUM_TIERS; i++) { (*cd)->arb[i] = kzalloc(((sizeof(uint8_t *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->arb[i]) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); goto arb_err; } (*cd)->actarb[i] = kzalloc(((sizeof(unsigned long *)) * (fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1), GFP_KERNEL); if (!(*cd)->actarb[i]) { MSM_BUS_DBG("Couldn't alloc memory for" " slaves\n"); kfree((*cd)->arb[i]); goto arb_err; } } return 0; arb_err: for (i = i - 1; i >= 0; i--) { kfree((*cd)->arb[i]); kfree((*cd)->actarb[i]); } bwsum_err: kfree((*cd)->bwsum); cdata_err: kfree(*cd); return -ENOMEM; } static void free_commit_data(void *cdata) { int i; struct commit_data *cd = (struct commit_data *)cdata; kfree(cd->bwsum); for (i = 0; i < NUM_TIERS; i++) { kfree(cd->arb[i]); kfree(cd->actarb[i]); } kfree(cd); } static void *msm_bus_rpm_allocate_rpm_data(struct platform_device *pdev, struct msm_bus_fabric_registration *fab_pdata) { struct msm_rpm_iv_pair *rpm_data; uint16_t count = (((fab_pdata->nmasters * fab_pdata->ntieredslaves * NUM_TIERS)/2) + fab_pdata->nslaves + 1)/2; rpm_data = kmalloc((sizeof(struct msm_rpm_iv_pair) * count), GFP_KERNEL); return (void *)rpm_data; } static int msm_bus_rpm_compare_cdata( struct msm_bus_fabric_registration *fab_pdata, struct commit_data *cd1, struct commit_data *cd2) { size_t n; int i, ret; n = sizeof(uint16_t) * fab_pdata->nslaves; ret = memcmp(cd1->bwsum, cd2->bwsum, n); if (ret) { MSM_BUS_DBG("Commit Data bwsum not equal\n"); return ret; } n = sizeof(uint8_t *) * ((fab_pdata->ntieredslaves * fab_pdata->nmasters) + 1); for (i = 0; i < NUM_TIERS; i++) { ret = memcmp(cd1->arb[i], cd2->arb[i], n); if (ret) { MSM_BUS_DBG("Commit Data arb[%d] not equal\n", i); return ret; } } return 0; } static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration *fab_pdata, int ctx, struct msm_rpm_iv_pair *rpm_data, struct commit_data *cd, bool valid) { int i, j, k, offset = 0, status = 0, count, index = 0; /* * count is the number of 2-byte words required to commit the * data to rpm. This is calculated by the following formula. * Commit data is split into two arrays: * 1. arb[nmasters * ntieredslaves][num_tiers] * 2. bwsum[nslaves] */ count = (((fab_pdata->nmasters * fab_pdata->ntieredslaves * NUM_TIERS) /2) + fab_pdata->nslaves + 1)/2; offset = fab_pdata->offset; /* * Copy bwsum to rpm data * Since bwsum is uint16, the values need to be adjusted to * be copied to value field of rpm-data, which is 32 bits. */ for (i = 0; i < (fab_pdata->nslaves - 1); i += 2) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT16(*(cd->bwsum + i + 1)) | *(cd->bwsum + i); index++; } /* Account for odd number of slaves */ if (fab_pdata->nslaves & 1) { rpm_data[index].id = offset + index; rpm_data[index].value = RPM_SHIFT8(*cd->arb[1]) | *(cd->arb[0]); rpm_data[index].value = RPM_SHIFT16(rpm_data[index].value) | *(cd->bwsum + i); index++; i = 1; } else i = 0; /* Copy arb values to rpm data */ for (; i < (fab_pdata->ntieredslaves * fab_pdata->nmasters); i += 2) { uint16_t tv1, tv0; rpm_data[index].id = offset + index; tv0 = RPM_SHIFT8(*(cd->arb[1] + i)) | (*(cd->arb[0] + i)); tv1 = RPM_SHIFT8(*(cd->arb[1] + i + 1)) | (*(cd->arb[0] + i + 1)); rpm_data[index].value = RPM_SHIFT16(tv1) | tv0; index++; } MSM_BUS_DBG("rpm data for fab: %d\n", fab_pdata->id); for (i = 0; i < count; i++) MSM_BUS_DBG("%d %x\n", rpm_data[i].id, rpm_data[i].value); MSM_BUS_DBG("Commit Data: Fab: %d BWSum:\n", fab_pdata->id); for (i = 0; i < fab_pdata->nslaves; i++) MSM_BUS_DBG("fab_slaves:0x%x\n", cd->bwsum[i]); MSM_BUS_DBG("Commit Data: Fab: %d Arb:\n", fab_pdata->id); for (k = 0; k < NUM_TIERS; k++) { MSM_BUS_DBG("Tier: %d\n", k); for (i = 0; i < fab_pdata->ntieredslaves; i++) { MSM_BUS_DBG("tiered-slave: %d\n", i); for (j = 0; j < fab_pdata->nmasters; j++) MSM_BUS_DBG(" 0x%x\n", cd->arb[k][(i * fab_pdata->nmasters) + j]); } } MSM_BUS_DBG("calling msm_rpm_set: %d\n", status); msm_bus_dbg_commit_data(fab_pdata->name, (void *)cd, fab_pdata-> nmasters, fab_pdata->nslaves, fab_pdata->ntieredslaves, MSM_BUS_DBG_OP); if (fab_pdata->rpm_enabled) { if (valid) { if (ctx == ACTIVE_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_set(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); } } else { if (ctx == ACTIVE_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_0, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } else if (ctx == DUAL_CTX) { status = msm_rpm_clear(MSM_RPM_CTX_SET_SLEEP, rpm_data, count); MSM_BUS_DBG("msm_rpm_clear returned: %d\n", status); } } } return status; } #define FORMAT_BW(x) \ ((x < 0) ? \ -(msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, -(x)))) : \ (msm_bus_get_bw_bytes(msm_bus_create_bw_tier_pair_bytes(0, x)))) static uint16_t msm_bus_pack_bwsum_bytes(unsigned long bw) { return (bw + ((1 << 20) - 1)) >> 20; }; static void msm_bus_rpm_update_bw(struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info, struct msm_bus_fabric_registration *fab_pdata, void *sel_cdata, int *master_tiers, int64_t add_bw) { int index, i, j, tiers, ports; struct commit_data *sel_cd = (struct commit_data *)sel_cdata; add_bw = INTERLEAVED_BW(fab_pdata, add_bw, info->node_info->num_mports); ports = INTERLEAVED_VAL(fab_pdata, info->node_info->num_mports); tiers = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_tiers); for (i = 0; i < tiers; i++) { for (j = 0; j < ports; j++) { uint16_t hop_tier; /* * For interleaved gateway ports and slave ports, * there is one-one mapping between gateway port and * the slave port */ if (info->node_info->gateway && i != j && hop->node_info->num_sports > 1) continue; if (!hop->node_info->tier) hop_tier = MSM_BUS_BW_TIER2 - 1; else hop_tier = hop->node_info->tier[i] - 1; index = ((hop_tier * fab_pdata->nmasters) + (info->node_info->masterp[j])); /* If there is tier, calculate arb for commit */ if (hop->node_info->tier) { uint16_t tier; unsigned long tieredbw; if (master_tiers) tier = master_tiers[0] - 1; else tier = MSM_BUS_BW_TIER2 - 1; tieredbw = sel_cd->actarb[tier][index]; /* * Make sure gateway to slave port bandwidth * is not divided when slave is interleaved */ if (info->node_info->gateway && hop->node_info->num_sports > 1) tieredbw += add_bw; else tieredbw += INTERLEAVED_BW(fab_pdata, add_bw, hop->node_info-> num_sports); /* Update Arb for fab,get HW Mport from enum */ sel_cd->arb[tier][index] = msm_bus_create_bw_tier_pair_bytes(0, tieredbw); sel_cd->actarb[tier][index] = tieredbw; MSM_BUS_DBG("tr:%d mpor:%d tbw:%lu bws: %lld\n", hop_tier, info->node_info->masterp[i], tieredbw, *hop->link_info.sel_bw); } } } /* Update bwsum for slaves on fabric */ ports = INTERLEAVED_VAL(fab_pdata, hop->node_info->num_sports); for (i = 0; i < ports; i++) { sel_cd->bwsum[hop->node_info->slavep[i]] = msm_bus_pack_bwsum_bytes((uint32_t) msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); MSM_BUS_DBG("slavep:%d, link_bw: %lld\n", hop->node_info->slavep[i], msm_bus_div64(hop->node_info->num_sports, *hop->link_info.sel_bw)); } } void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, void *cdata, int nmasters, int nslaves, int ntslaves) { int j, k, c; struct commit_data *cd = (struct commit_data *)cdata; *curr += scnprintf(buf + *curr, max_size - *curr, "BWSum:\n"); for (c = 0; c < nslaves; c++) *curr += scnprintf(buf + *curr, max_size - *curr, "0x%x\t", cd->bwsum[c]); *curr += scnprintf(buf + *curr, max_size - *curr, "\nArb:"); for (k = 0; k < NUM_TIERS; k++) { *curr += scnprintf(buf + *curr, max_size - *curr, "\nTier %d:\n", k); for (c = 0; c < ntslaves; c++) { *curr += scnprintf(buf + *curr, max_size - *curr, "TSlave %d:\n", c); for (j = 0; j < nmasters; j++) *curr += scnprintf(buf + *curr, max_size - *curr, " 0x%x\t", cd->arb[k][(c * nmasters) + j]); } } } #endif /** * msm_bus_rpm_commit() - Commit the arbitration data to RPM * @fabric: Fabric for which the data should be committed **/ static int msm_bus_rpm_commit(struct msm_bus_fabric_registration *fab_pdata, void *hw_data, void **cdata) { int ret; bool valid; struct commit_data *dual_cd, *act_cd; struct msm_rpm_iv_pair *rpm_data = (struct msm_rpm_iv_pair *)hw_data; dual_cd = (struct commit_data *)cdata[DUAL_CTX]; act_cd = (struct commit_data *)cdata[ACTIVE_CTX]; /* * If the arb data for active set and sleep set is * different, commit both sets. * If the arb data for active set and sleep set is * the same, invalidate the sleep set. */ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd); if (!ret) /* Invalidate sleep set.*/ valid = false; else valid = true; ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data, dual_cd, valid); if (ret) MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", fab_pdata->id, DUAL_CTX); valid = true; ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd, valid); if (ret) MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", fab_pdata->id, ACTIVE_CTX); return ret; } static int msm_bus_rpm_port_halt(uint32_t haltid, uint8_t mport) { int status = 0; struct msm_bus_halt_vector hvector = {0, 0}; struct msm_rpm_iv_pair rpm_data[2]; MSM_BUS_MASTER_HALT(hvector.haltmask, hvector.haltval, mport); rpm_data[0].id = haltid; rpm_data[0].value = hvector.haltval; rpm_data[1].id = haltid + 1; rpm_data[1].value = hvector.haltmask; MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_0, rpm_data[0].id, rpm_data[0].value); MSM_BUS_DBG("ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_0, rpm_data[1].id, rpm_data[1].value); status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2); if (status) MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); return status; } static int msm_bus_rpm_port_unhalt(uint32_t haltid, uint8_t mport) { int status = 0; struct msm_bus_halt_vector hvector = {0, 0}; struct msm_rpm_iv_pair rpm_data[2]; MSM_BUS_MASTER_UNHALT(hvector.haltmask, hvector.haltval, mport); rpm_data[0].id = haltid; rpm_data[0].value = hvector.haltval; rpm_data[1].id = haltid + 1; rpm_data[1].value = hvector.haltmask; MSM_BUS_DBG("unalt: ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_SLEEP, rpm_data[0].id, rpm_data[0].value); MSM_BUS_DBG("unhalt: ctx: %d, id: %d, value: %d\n", MSM_RPM_CTX_SET_SLEEP, rpm_data[1].id, rpm_data[1].value); status = msm_rpm_set(MSM_RPM_CTX_SET_0, rpm_data, 2); if (status) MSM_BUS_DBG("msm_rpm_set returned: %d\n", status); return status; } int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration *fab_pdata, void *hw_data, void **cdata) { return 0; } int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, struct msm_bus_hw_algorithm *hw_algo) { pdata->il_flag = msm_bus_rpm_is_mem_interleaved(); hw_algo->allocate_commit_data = msm_bus_rpm_allocate_commit_data; hw_algo->allocate_hw_data = msm_bus_rpm_allocate_rpm_data; hw_algo->node_init = NULL; hw_algo->free_commit_data = free_commit_data; hw_algo->update_bw = msm_bus_rpm_update_bw; hw_algo->commit = msm_bus_rpm_commit; hw_algo->port_halt = msm_bus_rpm_port_halt; hw_algo->port_unhalt = msm_bus_rpm_port_unhalt; if (!pdata->ahb) pdata->rpm_enabled = 1; return 0; }
gpl-2.0
greg-pe/platform_kernel_tuna
net/rds/ib_recv.c
3347
30550
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <rdma/rdma_cm.h> #include "rds.h" #include "ib.h" static struct kmem_cache *rds_ib_incoming_slab; static struct kmem_cache *rds_ib_frag_slab; static atomic_t rds_ib_allocation = ATOMIC_INIT(0); void rds_ib_recv_init_ring(struct rds_ib_connection *ic) { struct rds_ib_recv_work *recv; u32 i; for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { struct ib_sge *sge; recv->r_ibinc = NULL; recv->r_frag = NULL; recv->r_wr.next = NULL; recv->r_wr.wr_id = i; recv->r_wr.sg_list = recv->r_sge; recv->r_wr.num_sge = RDS_IB_RECV_SGE; sge = &recv->r_sge[0]; sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); sge->length = sizeof(struct rds_header); sge->lkey = ic->i_mr->lkey; sge = &recv->r_sge[1]; sge->addr = 0; sge->length = RDS_FRAG_SIZE; sge->lkey = ic->i_mr->lkey; } } /* * The entire 'from' list, including the from element itself, is put on * to the tail of the 'to' list. */ static void list_splice_entire_tail(struct list_head *from, struct list_head *to) { struct list_head *from_last = from->prev; list_splice_tail(from_last, to); list_add_tail(from_last, to); } static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) { struct list_head *tmp; tmp = xchg(&cache->xfer, NULL); if (tmp) { if (cache->ready) list_splice_entire_tail(tmp, cache->ready); else cache->ready = tmp; } } static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) { struct rds_ib_cache_head *head; int cpu; cache->percpu = alloc_percpu(struct rds_ib_cache_head); if (!cache->percpu) return -ENOMEM; for_each_possible_cpu(cpu) { head = per_cpu_ptr(cache->percpu, cpu); head->first = NULL; head->count = 0; } cache->xfer = NULL; cache->ready = NULL; return 0; } int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic) { int ret; ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs); if (!ret) { ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags); if (ret) free_percpu(ic->i_cache_incs.percpu); } return ret; } static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, struct list_head *caller_list) { struct rds_ib_cache_head *head; int cpu; for_each_possible_cpu(cpu) { head = per_cpu_ptr(cache->percpu, cpu); if (head->first) { list_splice_entire_tail(head->first, caller_list); head->first = NULL; } } if (cache->ready) { list_splice_entire_tail(cache->ready, caller_list); cache->ready = NULL; } } void rds_ib_recv_free_caches(struct rds_ib_connection *ic) { struct rds_ib_incoming *inc; struct rds_ib_incoming *inc_tmp; struct rds_page_frag *frag; struct rds_page_frag *frag_tmp; LIST_HEAD(list); rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); free_percpu(ic->i_cache_incs.percpu); list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) { list_del(&inc->ii_cache_entry); WARN_ON(!list_empty(&inc->ii_frags)); kmem_cache_free(rds_ib_incoming_slab, inc); } rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); free_percpu(ic->i_cache_frags.percpu); list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { list_del(&frag->f_cache_entry); WARN_ON(!list_empty(&frag->f_item)); kmem_cache_free(rds_ib_frag_slab, frag); } } /* fwd decl */ static void rds_ib_recv_cache_put(struct list_head *new_item, struct rds_ib_refill_cache *cache); static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); /* Recycle frag and attached recv buffer f_sg */ static void rds_ib_frag_free(struct rds_ib_connection *ic, struct rds_page_frag *frag) { rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); } /* Recycle inc after freeing attached frags */ void rds_ib_inc_free(struct rds_incoming *inc) { struct rds_ib_incoming *ibinc; struct rds_page_frag *frag; struct rds_page_frag *pos; struct rds_ib_connection *ic = inc->i_conn->c_transport_data; ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); /* Free attached frags */ list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { list_del_init(&frag->f_item); rds_ib_frag_free(ic, frag); } BUG_ON(!list_empty(&ibinc->ii_frags)); rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc); rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); } static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, struct rds_ib_recv_work *recv) { if (recv->r_ibinc) { rds_inc_put(&recv->r_ibinc->ii_inc); recv->r_ibinc = NULL; } if (recv->r_frag) { ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); rds_ib_frag_free(ic, recv->r_frag); recv->r_frag = NULL; } } void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) { u32 i; for (i = 0; i < ic->i_recv_ring.w_nr; i++) rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); } static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, gfp_t slab_mask) { struct rds_ib_incoming *ibinc; struct list_head *cache_item; int avail_allocs; cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); if (cache_item) { ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry); } else { avail_allocs = atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation); if (!avail_allocs) { rds_ib_stats_inc(s_ib_rx_alloc_limit); return NULL; } ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask); if (!ibinc) { atomic_dec(&rds_ib_allocation); return NULL; } } INIT_LIST_HEAD(&ibinc->ii_frags); rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr); return ibinc; } static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, gfp_t slab_mask, gfp_t page_mask) { struct rds_page_frag *frag; struct list_head *cache_item; int ret; cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); if (cache_item) { frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); } else { frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); if (!frag) return NULL; sg_init_table(&frag->f_sg, 1); ret = rds_page_remainder_alloc(&frag->f_sg, RDS_FRAG_SIZE, page_mask); if (ret) { kmem_cache_free(rds_ib_frag_slab, frag); return NULL; } } INIT_LIST_HEAD(&frag->f_item); return frag; } static int rds_ib_recv_refill_one(struct rds_connection *conn, struct rds_ib_recv_work *recv, int prefill) { struct rds_ib_connection *ic = conn->c_transport_data; struct ib_sge *sge; int ret = -ENOMEM; gfp_t slab_mask = GFP_NOWAIT; gfp_t page_mask = GFP_NOWAIT; if (prefill) { slab_mask = GFP_KERNEL; page_mask = GFP_HIGHUSER; } if (!ic->i_cache_incs.ready) rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); if (!ic->i_cache_frags.ready) rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); /* * ibinc was taken from recv if recv contained the start of a message. * recvs that were continuations will still have this allocated. */ if (!recv->r_ibinc) { recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); if (!recv->r_ibinc) goto out; } WARN_ON(recv->r_frag); /* leak! */ recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); if (!recv->r_frag) goto out; ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); WARN_ON(ret != 1); sge = &recv->r_sge[0]; sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); sge->length = sizeof(struct rds_header); sge = &recv->r_sge[1]; sge->addr = sg_dma_address(&recv->r_frag->f_sg); sge->length = sg_dma_len(&recv->r_frag->f_sg); ret = 0; out: return ret; } /* * This tries to allocate and post unused work requests after making sure that * they have all the allocations they need to queue received fragments into * sockets. * * -1 is returned if posting fails due to temporary resource exhaustion. */ void rds_ib_recv_refill(struct rds_connection *conn, int prefill) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_recv_work *recv; struct ib_recv_wr *failed_wr; unsigned int posted = 0; int ret = 0; u32 pos; while ((prefill || rds_conn_up(conn)) && rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { if (pos >= ic->i_recv_ring.w_nr) { printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", pos); break; } recv = &ic->i_recvs[pos]; ret = rds_ib_recv_refill_one(conn, recv, prefill); if (ret) { break; } /* XXX when can this fail? */ ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, recv->r_ibinc, sg_page(&recv->r_frag->f_sg), (long) sg_dma_address(&recv->r_frag->f_sg), ret); if (ret) { rds_ib_conn_error(conn, "recv post on " "%pI4 returned %d, disconnecting and " "reconnecting\n", &conn->c_faddr, ret); break; } posted++; } /* We're doing flow control - update the window. */ if (ic->i_flowctl && posted) rds_ib_advertise_credits(conn, posted); if (ret) rds_ib_ring_unalloc(&ic->i_recv_ring, 1); } /* * We want to recycle several types of recv allocations, like incs and frags. * To use this, the *_free() function passes in the ptr to a list_head within * the recyclee, as well as the cache to put it on. * * First, we put the memory on a percpu list. When this reaches a certain size, * We move it to an intermediate non-percpu list in a lockless manner, with some * xchg/compxchg wizardry. * * N.B. Instead of a list_head as the anchor, we use a single pointer, which can * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and * list_empty() will return true with one element is actually present. */ static void rds_ib_recv_cache_put(struct list_head *new_item, struct rds_ib_refill_cache *cache) { unsigned long flags; struct rds_ib_cache_head *chp; struct list_head *old; local_irq_save(flags); chp = per_cpu_ptr(cache->percpu, smp_processor_id()); if (!chp->first) INIT_LIST_HEAD(new_item); else /* put on front */ list_add_tail(new_item, chp->first); chp->first = new_item; chp->count++; if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT) goto end; /* * Return our per-cpu first list to the cache's xfer by atomically * grabbing the current xfer list, appending it to our per-cpu list, * and then atomically returning that entire list back to the * cache's xfer list as long as it's still empty. */ do { old = xchg(&cache->xfer, NULL); if (old) list_splice_entire_tail(old, chp->first); old = cmpxchg(&cache->xfer, NULL, chp->first); } while (old); chp->first = NULL; chp->count = 0; end: local_irq_restore(flags); } static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) { struct list_head *head = cache->ready; if (head) { if (!list_empty(head)) { cache->ready = head->next; list_del_init(head); } else cache->ready = NULL; } return head; } int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, size_t size) { struct rds_ib_incoming *ibinc; struct rds_page_frag *frag; struct iovec *iov = first_iov; unsigned long to_copy; unsigned long frag_off = 0; unsigned long iov_off = 0; int copied = 0; int ret; u32 len; ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); len = be32_to_cpu(inc->i_hdr.h_len); while (copied < size && copied < len) { if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } while (iov_off == iov->iov_len) { iov_off = 0; iov++; } to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); to_copy = min_t(size_t, to_copy, size - copied); to_copy = min_t(unsigned long, to_copy, len - copied); rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " "[%p, %u] + %lu\n", to_copy, iov->iov_base, iov->iov_len, iov_off, sg_page(&frag->f_sg), frag->f_sg.offset, frag_off); /* XXX needs + offset for multiple recvs per page */ ret = rds_page_copy_to_user(sg_page(&frag->f_sg), frag->f_sg.offset + frag_off, iov->iov_base + iov_off, to_copy); if (ret) { copied = ret; break; } iov_off += to_copy; frag_off += to_copy; copied += to_copy; } return copied; } /* ic starts out kzalloc()ed */ void rds_ib_recv_init_ack(struct rds_ib_connection *ic) { struct ib_send_wr *wr = &ic->i_ack_wr; struct ib_sge *sge = &ic->i_ack_sge; sge->addr = ic->i_ack_dma; sge->length = sizeof(struct rds_header); sge->lkey = ic->i_mr->lkey; wr->sg_list = sge; wr->num_sge = 1; wr->opcode = IB_WR_SEND; wr->wr_id = RDS_IB_ACK_WR_ID; wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; } /* * You'd think that with reliable IB connections you wouldn't need to ack * messages that have been received. The problem is that IB hardware generates * an ack message before it has DMAed the message into memory. This creates a * potential message loss if the HCA is disabled for any reason between when it * sends the ack and before the message is DMAed and processed. This is only a * potential issue if another HCA is available for fail-over. * * When the remote host receives our ack they'll free the sent message from * their send queue. To decrease the latency of this we always send an ack * immediately after we've received messages. * * For simplicity, we only have one ack in flight at a time. This puts * pressure on senders to have deep enough send queues to absorb the latency of * a single ack frame being in flight. This might not be good enough. * * This is implemented by have a long-lived send_wr and sge which point to a * statically allocated ack frame. This ack wr does not fall under the ring * accounting that the tx and rx wrs do. The QP attribute specifically makes * room for it beyond the ring size. Send completion notices its special * wr_id and avoids working with the ring in that case. */ #ifndef KERNEL_HAS_ATOMIC64 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) { unsigned long flags; spin_lock_irqsave(&ic->i_ack_lock, flags); ic->i_ack_next = seq; if (ack_required) set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_unlock_irqrestore(&ic->i_ack_lock, flags); } static u64 rds_ib_get_ack(struct rds_ib_connection *ic) { unsigned long flags; u64 seq; clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); spin_lock_irqsave(&ic->i_ack_lock, flags); seq = ic->i_ack_next; spin_unlock_irqrestore(&ic->i_ack_lock, flags); return seq; } #else static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) { atomic64_set(&ic->i_ack_next, seq); if (ack_required) { smp_mb__before_clear_bit(); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); } } static u64 rds_ib_get_ack(struct rds_ib_connection *ic) { clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); smp_mb__after_clear_bit(); return atomic64_read(&ic->i_ack_next); } #endif static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) { struct rds_header *hdr = ic->i_ack; struct ib_send_wr *failed_wr; u64 seq; int ret; seq = rds_ib_get_ack(ic); rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); rds_message_populate_header(hdr, 0, 0, 0); hdr->h_ack = cpu_to_be64(seq); hdr->h_credit = adv_credits; rds_message_make_checksum(hdr); ic->i_ack_queued = jiffies; ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); if (unlikely(ret)) { /* Failed to send. Release the WR, and * force another ACK. */ clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_ib_stats_inc(s_ib_ack_send_failure); rds_ib_conn_error(ic->conn, "sending ack failed\n"); } else rds_ib_stats_inc(s_ib_ack_sent); } /* * There are 3 ways of getting acknowledgements to the peer: * 1. We call rds_ib_attempt_ack from the recv completion handler * to send an ACK-only frame. * However, there can be only one such frame in the send queue * at any time, so we may have to postpone it. * 2. When another (data) packet is transmitted while there's * an ACK in the queue, we piggyback the ACK sequence number * on the data packet. * 3. If the ACK WR is done sending, we get called from the * send queue completion handler, and check whether there's * another ACK pending (postponed because the WR was on the * queue). If so, we transmit it. * * We maintain 2 variables: * - i_ack_flags, which keeps track of whether the ACK WR * is currently in the send queue or not (IB_ACK_IN_FLIGHT) * - i_ack_next, which is the last sequence number we received * * Potentially, send queue and receive queue handlers can run concurrently. * It would be nice to not have to use a spinlock to synchronize things, * but the one problem that rules this out is that 64bit updates are * not atomic on all platforms. Things would be a lot simpler if * we had atomic64 or maybe cmpxchg64 everywhere. * * Reconnecting complicates this picture just slightly. When we * reconnect, we may be seeing duplicate packets. The peer * is retransmitting them, because it hasn't seen an ACK for * them. It is important that we ACK these. * * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with * this flag set *MUST* be acknowledged immediately. */ /* * When we get here, we're called from the recv queue handler. * Check whether we ought to transmit an ACK. */ void rds_ib_attempt_ack(struct rds_ib_connection *ic) { unsigned int adv_credits; if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) return; if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { rds_ib_stats_inc(s_ib_ack_send_delayed); return; } /* Can we get a send credit? */ if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { rds_ib_stats_inc(s_ib_tx_throttle); clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); return; } clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); rds_ib_send_ack(ic, adv_credits); } /* * We get here from the send completion handler, when the * adapter tells us the ACK frame was sent. */ void rds_ib_ack_send_complete(struct rds_ib_connection *ic) { clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); rds_ib_attempt_ack(ic); } /* * This is called by the regular xmit code when it wants to piggyback * an ACK on an outgoing frame. */ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) { if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) rds_ib_stats_inc(s_ib_ack_send_piggybacked); return rds_ib_get_ack(ic); } /* * It's kind of lame that we're copying from the posted receive pages into * long-lived bitmaps. We could have posted the bitmaps and rdma written into * them. But receiving new congestion bitmaps should be a *rare* event, so * hopefully we won't need to invest that complexity in making it more * efficient. By copying we can share a simpler core with TCP which has to * copy. */ static void rds_ib_cong_recv(struct rds_connection *conn, struct rds_ib_incoming *ibinc) { struct rds_cong_map *map; unsigned int map_off; unsigned int map_page; struct rds_page_frag *frag; unsigned long frag_off; unsigned long to_copy; unsigned long copied; uint64_t uncongested = 0; void *addr; /* catch completely corrupt packets */ if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) return; map = conn->c_fcong; map_page = 0; map_off = 0; frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); frag_off = 0; copied = 0; while (copied < RDS_CONG_MAP_BYTES) { uint64_t *src, *dst; unsigned int k; to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0); src = addr + frag_off; dst = (void *)map->m_page_addrs[map_page] + map_off; for (k = 0; k < to_copy; k += 8) { /* Record ports that became uncongested, ie * bits that changed from 0 to 1. */ uncongested |= ~(*src) & *dst; *dst++ = *src++; } kunmap_atomic(addr, KM_SOFTIRQ0); copied += to_copy; map_off += to_copy; if (map_off == PAGE_SIZE) { map_off = 0; map_page++; } frag_off += to_copy; if (frag_off == RDS_FRAG_SIZE) { frag = list_entry(frag->f_item.next, struct rds_page_frag, f_item); frag_off = 0; } } /* the congestion map is in little endian order */ uncongested = le64_to_cpu(uncongested); rds_cong_map_updated(map, uncongested); } /* * Rings are posted with all the allocations they'll need to queue the * incoming message to the receiving socket so this can't fail. * All fragments start with a header, so we can make sure we're not receiving * garbage, and we can tell a small 8 byte fragment from an ACK frame. */ struct rds_ib_ack_state { u64 ack_next; u64 ack_recv; unsigned int ack_required:1; unsigned int ack_next_valid:1; unsigned int ack_recv_valid:1; }; static void rds_ib_process_recv(struct rds_connection *conn, struct rds_ib_recv_work *recv, u32 data_len, struct rds_ib_ack_state *state) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_incoming *ibinc = ic->i_ibinc; struct rds_header *ihdr, *hdr; /* XXX shut down the connection if port 0,0 are seen? */ rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, data_len); if (data_len < sizeof(struct rds_header)) { rds_ib_conn_error(conn, "incoming message " "from %pI4 didn't inclue a " "header, disconnecting and " "reconnecting\n", &conn->c_faddr); return; } data_len -= sizeof(struct rds_header); ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; /* Validate the checksum. */ if (!rds_message_verify_checksum(ihdr)) { rds_ib_conn_error(conn, "incoming message " "from %pI4 has corrupted header - " "forcing a reconnect\n", &conn->c_faddr); rds_stats_inc(s_recv_drop_bad_checksum); return; } /* Process the ACK sequence which comes with every packet */ state->ack_recv = be64_to_cpu(ihdr->h_ack); state->ack_recv_valid = 1; /* Process the credits update if there was one */ if (ihdr->h_credit) rds_ib_send_add_credits(conn, ihdr->h_credit); if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { /* This is an ACK-only packet. The fact that it gets * special treatment here is that historically, ACKs * were rather special beasts. */ rds_ib_stats_inc(s_ib_ack_received); /* * Usually the frags make their way on to incs and are then freed as * the inc is freed. We don't go that route, so we have to drop the * page ref ourselves. We can't just leave the page on the recv * because that confuses the dma mapping of pages and each recv's use * of a partial page. * * FIXME: Fold this into the code path below. */ rds_ib_frag_free(ic, recv->r_frag); recv->r_frag = NULL; return; } /* * If we don't already have an inc on the connection then this * fragment has a header and starts a message.. copy its header * into the inc and save the inc so we can hang upcoming fragments * off its list. */ if (!ibinc) { ibinc = recv->r_ibinc; recv->r_ibinc = NULL; ic->i_ibinc = ibinc; hdr = &ibinc->ii_inc.i_hdr; memcpy(hdr, ihdr, sizeof(*hdr)); ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, ic->i_recv_data_rem, hdr->h_flags); } else { hdr = &ibinc->ii_inc.i_hdr; /* We can't just use memcmp here; fragments of a * single message may carry different ACKs */ if (hdr->h_sequence != ihdr->h_sequence || hdr->h_len != ihdr->h_len || hdr->h_sport != ihdr->h_sport || hdr->h_dport != ihdr->h_dport) { rds_ib_conn_error(conn, "fragment header mismatch; forcing reconnect\n"); return; } } list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); recv->r_frag = NULL; if (ic->i_recv_data_rem > RDS_FRAG_SIZE) ic->i_recv_data_rem -= RDS_FRAG_SIZE; else { ic->i_recv_data_rem = 0; ic->i_ibinc = NULL; if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) rds_ib_cong_recv(conn, ibinc); else { rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, &ibinc->ii_inc, GFP_ATOMIC, KM_SOFTIRQ0); state->ack_next = be64_to_cpu(hdr->h_sequence); state->ack_next_valid = 1; } /* Evaluate the ACK_REQUIRED flag *after* we received * the complete frame, and after bumping the next_rx * sequence. */ if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { rds_stats_inc(s_recv_ack_required); state->ack_required = 1; } rds_inc_put(&ibinc->ii_inc); } } /* * Plucking the oldest entry from the ring can be done concurrently with * the thread refilling the ring. Each ring operation is protected by * spinlocks and the transient state of refilling doesn't change the * recording of which entry is oldest. * * This relies on IB only calling one cq comp_handler for each cq so that * there will only be one caller of rds_recv_incoming() per RDS connection. */ void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) { struct rds_connection *conn = context; struct rds_ib_connection *ic = conn->c_transport_data; rdsdebug("conn %p cq %p\n", conn, cq); rds_ib_stats_inc(s_ib_rx_cq_call); tasklet_schedule(&ic->i_recv_tasklet); } static inline void rds_poll_cq(struct rds_ib_connection *ic, struct rds_ib_ack_state *state) { struct rds_connection *conn = ic->conn; struct ib_wc wc; struct rds_ib_recv_work *recv; while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", (unsigned long long)wc.wr_id, wc.status, rds_ib_wc_status_str(wc.status), wc.byte_len, be32_to_cpu(wc.ex.imm_data)); rds_ib_stats_inc(s_ib_rx_cq_event); recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); /* * Also process recvs in connecting state because it is possible * to get a recv completion _before_ the rdmacm ESTABLISHED * event is processed. */ if (wc.status == IB_WC_SUCCESS) { rds_ib_process_recv(conn, recv, wc.byte_len, state); } else { /* We expect errors as the qp is drained during shutdown */ if (rds_conn_up(conn) || rds_conn_connecting(conn)) rds_ib_conn_error(conn, "recv completion on %pI4 had " "status %u (%s), disconnecting and " "reconnecting\n", &conn->c_faddr, wc.status, rds_ib_wc_status_str(wc.status)); } /* * It's very important that we only free this ring entry if we've truly * freed the resources allocated to the entry. The refilling path can * leak if we don't. */ rds_ib_ring_free(&ic->i_recv_ring, 1); } } void rds_ib_recv_tasklet_fn(unsigned long data) { struct rds_ib_connection *ic = (struct rds_ib_connection *) data; struct rds_connection *conn = ic->conn; struct rds_ib_ack_state state = { 0, }; rds_poll_cq(ic, &state); ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); rds_poll_cq(ic, &state); if (state.ack_next_valid) rds_ib_set_ack(ic, state.ack_next, state.ack_required); if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { rds_send_drop_acked(conn, state.ack_recv, NULL); ic->i_ack_recv = state.ack_recv; } if (rds_conn_up(conn)) rds_ib_attempt_ack(ic); /* If we ever end up with a really empty receive ring, we're * in deep trouble, as the sender will definitely see RNR * timeouts. */ if (rds_ib_ring_empty(&ic->i_recv_ring)) rds_ib_stats_inc(s_ib_rx_ring_empty); if (rds_ib_ring_low(&ic->i_recv_ring)) rds_ib_recv_refill(conn, 0); } int rds_ib_recv(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; int ret = 0; rdsdebug("conn %p\n", conn); if (rds_conn_up(conn)) rds_ib_attempt_ack(ic); return ret; } int rds_ib_recv_init(void) { struct sysinfo si; int ret = -ENOMEM; /* Default to 30% of all available RAM for recv memory */ si_meminfo(&si); rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", sizeof(struct rds_ib_incoming), 0, SLAB_HWCACHE_ALIGN, NULL); if (!rds_ib_incoming_slab) goto out; rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", sizeof(struct rds_page_frag), 0, SLAB_HWCACHE_ALIGN, NULL); if (!rds_ib_frag_slab) kmem_cache_destroy(rds_ib_incoming_slab); else ret = 0; out: return ret; } void rds_ib_recv_exit(void) { kmem_cache_destroy(rds_ib_incoming_slab); kmem_cache_destroy(rds_ib_frag_slab); }
gpl-2.0
F4uzan/f4kernel-u0
arch/mips/lantiq/irq.c
4371
8314
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com> */ #include <linux/interrupt.h> #include <linux/ioport.h> #include <asm/bootinfo.h> #include <asm/irq_cpu.h> #include <lantiq_soc.h> #include <irq.h> /* register definitions */ #define LTQ_ICU_IM0_ISR 0x0000 #define LTQ_ICU_IM0_IER 0x0008 #define LTQ_ICU_IM0_IOSR 0x0010 #define LTQ_ICU_IM0_IRSR 0x0018 #define LTQ_ICU_IM0_IMR 0x0020 #define LTQ_ICU_IM1_ISR 0x0028 #define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) #define LTQ_EIU_EXIN_C 0x0000 #define LTQ_EIU_EXIN_INIC 0x0004 #define LTQ_EIU_EXIN_INEN 0x000C /* irq numbers used by the external interrupt unit (EIU) */ #define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30) #define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31) #define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26) #define LTQ_EIU_IR3 INT_NUM_IM1_IRL0 #define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) #define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) #define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) #define MAX_EIU 6 /* irqs generated by device attached to the EBU need to be acked in * a special manner */ #define LTQ_ICU_EBU_IRQ 22 #define ltq_icu_w32(x, y) ltq_w32((x), ltq_icu_membase + (y)) #define ltq_icu_r32(x) ltq_r32(ltq_icu_membase + (x)) #define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) #define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) static unsigned short ltq_eiu_irq[MAX_EIU] = { LTQ_EIU_IR0, LTQ_EIU_IR1, LTQ_EIU_IR2, LTQ_EIU_IR3, LTQ_EIU_IR4, LTQ_EIU_IR5, }; static struct resource ltq_icu_resource = { .name = "icu", .start = LTQ_ICU_BASE_ADDR, .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct resource ltq_eiu_resource = { .name = "eiu", .start = LTQ_EIU_BASE_ADDR, .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1, .flags = IORESOURCE_MEM, }; static void __iomem *ltq_icu_membase; static void __iomem *ltq_eiu_membase; void ltq_disable_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; int irq_nr = d->irq - INT_NUM_IRQ0; ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); irq_nr %= INT_NUM_IM_OFFSET; ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); } void ltq_mask_and_ack_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; u32 isr = LTQ_ICU_IM0_ISR; int irq_nr = d->irq - INT_NUM_IRQ0; ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); irq_nr %= INT_NUM_IM_OFFSET; ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); ltq_icu_w32((1 << irq_nr), isr); } static void ltq_ack_irq(struct irq_data *d) { u32 isr = LTQ_ICU_IM0_ISR; int irq_nr = d->irq - INT_NUM_IRQ0; isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); irq_nr %= INT_NUM_IM_OFFSET; ltq_icu_w32((1 << irq_nr), isr); } void ltq_enable_irq(struct irq_data *d) { u32 ier = LTQ_ICU_IM0_IER; int irq_nr = d->irq - INT_NUM_IRQ0; ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); irq_nr %= INT_NUM_IM_OFFSET; ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); } static unsigned int ltq_startup_eiu_irq(struct irq_data *d) { int i; ltq_enable_irq(d); for (i = 0; i < MAX_EIU; i++) { if (d->irq == ltq_eiu_irq[i]) { /* low level - we should really handle set_type */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); /* clear all pending */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), LTQ_EIU_EXIN_INIC); /* enable */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), LTQ_EIU_EXIN_INEN); break; } } return 0; } static void ltq_shutdown_eiu_irq(struct irq_data *d) { int i; ltq_disable_irq(d); for (i = 0; i < MAX_EIU; i++) { if (d->irq == ltq_eiu_irq[i]) { /* disable */ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), LTQ_EIU_EXIN_INEN); break; } } } static struct irq_chip ltq_irq_type = { "icu", .irq_enable = ltq_enable_irq, .irq_disable = ltq_disable_irq, .irq_unmask = ltq_enable_irq, .irq_ack = ltq_ack_irq, .irq_mask = ltq_disable_irq, .irq_mask_ack = ltq_mask_and_ack_irq, }; static struct irq_chip ltq_eiu_type = { "eiu", .irq_startup = ltq_startup_eiu_irq, .irq_shutdown = ltq_shutdown_eiu_irq, .irq_enable = ltq_enable_irq, .irq_disable = ltq_disable_irq, .irq_unmask = ltq_enable_irq, .irq_ack = ltq_ack_irq, .irq_mask = ltq_disable_irq, .irq_mask_ack = ltq_mask_and_ack_irq, }; static void ltq_hw_irqdispatch(int module) { u32 irq; irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); if (irq == 0) return; /* silicon bug causes only the msb set to 1 to be valid. all * other bits might be bogus */ irq = __fls(irq); do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); } #define DEFINE_HWx_IRQDISPATCH(x) \ static void ltq_hw ## x ## _irqdispatch(void) \ { \ ltq_hw_irqdispatch(x); \ } DEFINE_HWx_IRQDISPATCH(0) DEFINE_HWx_IRQDISPATCH(1) DEFINE_HWx_IRQDISPATCH(2) DEFINE_HWx_IRQDISPATCH(3) DEFINE_HWx_IRQDISPATCH(4) static void ltq_hw5_irqdispatch(void) { do_IRQ(MIPS_CPU_TIMER_IRQ); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; unsigned int i; if (pending & CAUSEF_IP7) { do_IRQ(MIPS_CPU_TIMER_IRQ); goto out; } else { for (i = 0; i < 5; i++) { if (pending & (CAUSEF_IP2 << i)) { ltq_hw_irqdispatch(i); goto out; } } } pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status()); out: return; } static struct irqaction cascade = { .handler = no_action, .name = "cascade", }; void __init arch_init_irq(void) { int i; if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0) panic("Failed to insert icu memory"); if (request_mem_region(ltq_icu_resource.start, resource_size(&ltq_icu_resource), "icu") < 0) panic("Failed to request icu memory"); ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, resource_size(&ltq_icu_resource)); if (!ltq_icu_membase) panic("Failed to remap icu memory"); if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0) panic("Failed to insert eiu memory"); if (request_mem_region(ltq_eiu_resource.start, resource_size(&ltq_eiu_resource), "eiu") < 0) panic("Failed to request eiu memory"); ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, resource_size(&ltq_eiu_resource)); if (!ltq_eiu_membase) panic("Failed to remap eiu memory"); /* make sure all irqs are turned off by default */ for (i = 0; i < 5; i++) ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); /* clear all possibly pending interrupts */ ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); mips_cpu_irq_init(); for (i = 2; i <= 6; i++) setup_irq(i, &cascade); if (cpu_has_vint) { pr_info("Setting up vectored interrupts\n"); set_vi_handler(2, ltq_hw0_irqdispatch); set_vi_handler(3, ltq_hw1_irqdispatch); set_vi_handler(4, ltq_hw2_irqdispatch); set_vi_handler(5, ltq_hw3_irqdispatch); set_vi_handler(6, ltq_hw4_irqdispatch); set_vi_handler(7, ltq_hw5_irqdispatch); } for (i = INT_NUM_IRQ0; i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || (i == LTQ_EIU_IR2)) irq_set_chip_and_handler(i, &ltq_eiu_type, handle_level_irq); /* EIU3-5 only exist on ar9 and vr9 */ else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) irq_set_chip_and_handler(i, &ltq_eiu_type, handle_level_irq); else irq_set_chip_and_handler(i, &ltq_irq_type, handle_level_irq); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #else set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); #endif } unsigned int __cpuinit get_c0_compare_int(void) { return CP0_LEGACY_COMPARE_IRQ; }
gpl-2.0
TeamEOS/kernel_htc_msm8960
arch/powerpc/sysdev/qe_lib/usb.c
4883
1720
/* * QE USB routines * * Copyright (c) Freescale Semicondutor, Inc. 2006. * Shlomi Gridish <gridish@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * Copyright (c) MontaVista Software, Inc. 2008. * Anton Vorontsov <avorontsov@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/io.h> #include <asm/immap_qe.h> #include <asm/qe.h> int qe_usb_clock_set(enum qe_clock clk, int rate) { struct qe_mux __iomem *mux = &qe_immr->qmx; unsigned long flags; u32 val; switch (clk) { case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break; case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break; case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break; case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break; case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break; case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break; case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break; case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break; case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break; case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break; default: pr_err("%s: requested unknown clock %d\n", __func__, clk); return -EINVAL; } if (qe_clock_is_brg(clk)) qe_setbrg(clk, rate, 1); spin_lock_irqsave(&cmxgcr_lock, flags); clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val); spin_unlock_irqrestore(&cmxgcr_lock, flags); return 0; } EXPORT_SYMBOL(qe_usb_clock_set);
gpl-2.0
xdajog/kernel_fx3q_aosp
drivers/rtc/rtc-sh.c
5139
20627
/* * SuperH On-Chip RTC Support * * Copyright (C) 2006 - 2009 Paul Mundt * Copyright (C) 2006 Jamie Lenehan * Copyright (C) 2008 Angelo Castello * * Based on the old arch/sh/kernel/cpu/rtc.c by: * * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/log2.h> #include <linux/clk.h> #include <linux/slab.h> #include <asm/rtc.h> #define DRV_NAME "sh-rtc" #define DRV_VERSION "0.2.3" #define RTC_REG(r) ((r) * rtc_reg_size) #define R64CNT RTC_REG(0) #define RSECCNT RTC_REG(1) /* RTC sec */ #define RMINCNT RTC_REG(2) /* RTC min */ #define RHRCNT RTC_REG(3) /* RTC hour */ #define RWKCNT RTC_REG(4) /* RTC week */ #define RDAYCNT RTC_REG(5) /* RTC day */ #define RMONCNT RTC_REG(6) /* RTC month */ #define RYRCNT RTC_REG(7) /* RTC year */ #define RSECAR RTC_REG(8) /* ALARM sec */ #define RMINAR RTC_REG(9) /* ALARM min */ #define RHRAR RTC_REG(10) /* ALARM hour */ #define RWKAR RTC_REG(11) /* ALARM week */ #define RDAYAR RTC_REG(12) /* ALARM day */ #define RMONAR RTC_REG(13) /* ALARM month */ #define RCR1 RTC_REG(14) /* Control */ #define RCR2 RTC_REG(15) /* Control */ /* * Note on RYRAR and RCR3: Up until this point most of the register * definitions are consistent across all of the available parts. However, * the placement of the optional RYRAR and RCR3 (the RYRAR control * register used to control RYRCNT/RYRAR compare) varies considerably * across various parts, occasionally being mapped in to a completely * unrelated address space. For proper RYRAR support a separate resource * would have to be handed off, but as this is purely optional in * practice, we simply opt not to support it, thereby keeping the code * quite a bit more simplified. */ /* ALARM Bits - or with BCD encoded value */ #define AR_ENB 0x80 /* Enable for alarm cmp */ /* Period Bits */ #define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */ #define PF_COUNT 0x200 /* Half periodic counter */ #define PF_OXS 0x400 /* Periodic One x Second */ #define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */ #define PF_MASK 0xf00 /* RCR1 Bits */ #define RCR1_CF 0x80 /* Carry Flag */ #define RCR1_CIE 0x10 /* Carry Interrupt Enable */ #define RCR1_AIE 0x08 /* Alarm Interrupt Enable */ #define RCR1_AF 0x01 /* Alarm Flag */ /* RCR2 Bits */ #define RCR2_PEF 0x80 /* PEriodic interrupt Flag */ #define RCR2_PESMASK 0x70 /* Periodic interrupt Set */ #define RCR2_RTCEN 0x08 /* ENable RTC */ #define RCR2_ADJ 0x04 /* ADJustment (30-second) */ #define RCR2_RESET 0x02 /* Reset bit */ #define RCR2_START 0x01 /* Start bit */ struct sh_rtc { void __iomem *regbase; unsigned long regsize; struct resource *res; int alarm_irq; int periodic_irq; int carry_irq; struct clk *clk; struct rtc_device *rtc_dev; spinlock_t lock; unsigned long capabilities; /* See asm/rtc.h for cap bits */ unsigned short periodic_freq; }; static int __sh_rtc_interrupt(struct sh_rtc *rtc) { unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); pending = tmp & RCR1_CF; tmp &= ~RCR1_CF; writeb(tmp, rtc->regbase + RCR1); /* Users have requested One x Second IRQ */ if (pending && rtc->periodic_freq & PF_OXS) rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); return pending; } static int __sh_rtc_alarm(struct sh_rtc *rtc) { unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR1); pending = tmp & RCR1_AF; tmp &= ~(RCR1_AF | RCR1_AIE); writeb(tmp, rtc->regbase + RCR1); if (pending) rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); return pending; } static int __sh_rtc_periodic(struct sh_rtc *rtc) { struct rtc_device *rtc_dev = rtc->rtc_dev; struct rtc_task *irq_task; unsigned int tmp, pending; tmp = readb(rtc->regbase + RCR2); pending = tmp & RCR2_PEF; tmp &= ~RCR2_PEF; writeb(tmp, rtc->regbase + RCR2); if (!pending) return 0; /* Half period enabled than one skipped and the next notified */ if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT)) rtc->periodic_freq &= ~PF_COUNT; else { if (rtc->periodic_freq & PF_HP) rtc->periodic_freq |= PF_COUNT; if (rtc->periodic_freq & PF_KOU) { spin_lock(&rtc_dev->irq_task_lock); irq_task = rtc_dev->irq_task; if (irq_task) irq_task->func(irq_task->private_data); spin_unlock(&rtc_dev->irq_task_lock); } else rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF); } return pending; } static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_interrupt(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_alarm(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_alarm(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_periodic(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_periodic(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static irqreturn_t sh_rtc_shared(int irq, void *dev_id) { struct sh_rtc *rtc = dev_id; int ret; spin_lock(&rtc->lock); ret = __sh_rtc_interrupt(rtc); ret |= __sh_rtc_alarm(rtc); ret |= __sh_rtc_periodic(rtc); spin_unlock(&rtc->lock); return IRQ_RETVAL(ret); } static int sh_rtc_irq_set_state(struct device *dev, int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR2); if (enable) { rtc->periodic_freq |= PF_KOU; tmp &= ~RCR2_PEF; /* Clear PES bit */ tmp |= (rtc->periodic_freq & ~PF_HP); /* Set PES2-0 */ } else { rtc->periodic_freq &= ~PF_KOU; tmp &= ~(RCR2_PESMASK | RCR2_PEF); } writeb(tmp, rtc->regbase + RCR2); spin_unlock_irq(&rtc->lock); return 0; } static int sh_rtc_irq_set_freq(struct device *dev, int freq) { struct sh_rtc *rtc = dev_get_drvdata(dev); int tmp, ret = 0; spin_lock_irq(&rtc->lock); tmp = rtc->periodic_freq & PF_MASK; switch (freq) { case 0: rtc->periodic_freq = 0x00; break; case 1: rtc->periodic_freq = 0x60; break; case 2: rtc->periodic_freq = 0x50; break; case 4: rtc->periodic_freq = 0x40; break; case 8: rtc->periodic_freq = 0x30 | PF_HP; break; case 16: rtc->periodic_freq = 0x30; break; case 32: rtc->periodic_freq = 0x20 | PF_HP; break; case 64: rtc->periodic_freq = 0x20; break; case 128: rtc->periodic_freq = 0x10 | PF_HP; break; case 256: rtc->periodic_freq = 0x10; break; default: ret = -ENOTSUPP; } if (ret == 0) rtc->periodic_freq |= tmp; spin_unlock_irq(&rtc->lock); return ret; } static inline void sh_rtc_setaie(struct device *dev, unsigned int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); if (enable) tmp |= RCR1_AIE; else tmp &= ~RCR1_AIE; writeb(tmp, rtc->regbase + RCR1); spin_unlock_irq(&rtc->lock); } static int sh_rtc_proc(struct device *dev, struct seq_file *seq) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; tmp = readb(rtc->regbase + RCR1); seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no"); tmp = readb(rtc->regbase + RCR2); seq_printf(seq, "periodic_IRQ\t: %s\n", (tmp & RCR2_PESMASK) ? "yes" : "no"); return 0; } static inline void sh_rtc_setcie(struct device *dev, unsigned int enable) { struct sh_rtc *rtc = dev_get_drvdata(dev); unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); if (!enable) tmp &= ~RCR1_CIE; else tmp |= RCR1_CIE; writeb(tmp, rtc->regbase + RCR1); spin_unlock_irq(&rtc->lock); } static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { sh_rtc_setaie(dev, enabled); return 0; } static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int sec128, sec2, yr, yr100, cf_bit; do { unsigned int tmp; spin_lock_irq(&rtc->lock); tmp = readb(rtc->regbase + RCR1); tmp &= ~RCR1_CF; /* Clear CF-bit */ tmp |= RCR1_CIE; writeb(tmp, rtc->regbase + RCR1); sec128 = readb(rtc->regbase + R64CNT); tm->tm_sec = bcd2bin(readb(rtc->regbase + RSECCNT)); tm->tm_min = bcd2bin(readb(rtc->regbase + RMINCNT)); tm->tm_hour = bcd2bin(readb(rtc->regbase + RHRCNT)); tm->tm_wday = bcd2bin(readb(rtc->regbase + RWKCNT)); tm->tm_mday = bcd2bin(readb(rtc->regbase + RDAYCNT)); tm->tm_mon = bcd2bin(readb(rtc->regbase + RMONCNT)) - 1; if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { yr = readw(rtc->regbase + RYRCNT); yr100 = bcd2bin(yr >> 8); yr &= 0xff; } else { yr = readb(rtc->regbase + RYRCNT); yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20); } tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900; sec2 = readb(rtc->regbase + R64CNT); cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF; spin_unlock_irq(&rtc->lock); } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0); #if RTC_BIT_INVERTED != 0 if ((sec128 & RTC_BIT_INVERTED)) tm->tm_sec--; #endif /* only keep the carry interrupt enabled if UIE is on */ if (!(rtc->periodic_freq & PF_OXS)) sh_rtc_setcie(dev, 0); dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " "mday=%d, mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday); return rtc_valid_tm(tm); } static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int tmp; int year; spin_lock_irq(&rtc->lock); /* Reset pre-scaler & stop RTC */ tmp = readb(rtc->regbase + RCR2); tmp |= RCR2_RESET; tmp &= ~RCR2_START; writeb(tmp, rtc->regbase + RCR2); writeb(bin2bcd(tm->tm_sec), rtc->regbase + RSECCNT); writeb(bin2bcd(tm->tm_min), rtc->regbase + RMINCNT); writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT); writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT); writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT); writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT); if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) { year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) | bin2bcd(tm->tm_year % 100); writew(year, rtc->regbase + RYRCNT); } else { year = tm->tm_year % 100; writeb(bin2bcd(year), rtc->regbase + RYRCNT); } /* Start RTC */ tmp = readb(rtc->regbase + RCR2); tmp &= ~RCR2_RESET; tmp |= RCR2_RTCEN | RCR2_START; writeb(tmp, rtc->regbase + RCR2); spin_unlock_irq(&rtc->lock); return 0; } static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) { unsigned int byte; int value = 0xff; /* return 0xff for ignored values */ byte = readb(rtc->regbase + reg_off); if (byte & AR_ENB) { byte &= ~AR_ENB; /* strip the enable bit */ value = bcd2bin(byte); } return value; } static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); struct rtc_time *tm = &wkalrm->time; spin_lock_irq(&rtc->lock); tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR); tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR); tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR); tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR); tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR); tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR); if (tm->tm_mon > 0) tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */ tm->tm_year = 0xffff; wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0; spin_unlock_irq(&rtc->lock); return 0; } static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc, int value, int reg_off) { /* < 0 for a value that is ignored */ if (value < 0) writeb(0, rtc->regbase + reg_off); else writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off); } static int sh_rtc_check_alarm(struct rtc_time *tm) { /* * The original rtc says anything > 0xc0 is "don't care" or "match * all" - most users use 0xff but rtc-dev uses -1 for the same thing. * The original rtc doesn't support years - some things use -1 and * some 0xffff. We use -1 to make out tests easier. */ if (tm->tm_year == 0xffff) tm->tm_year = -1; if (tm->tm_mon >= 0xff) tm->tm_mon = -1; if (tm->tm_mday >= 0xff) tm->tm_mday = -1; if (tm->tm_wday >= 0xff) tm->tm_wday = -1; if (tm->tm_hour >= 0xff) tm->tm_hour = -1; if (tm->tm_min >= 0xff) tm->tm_min = -1; if (tm->tm_sec >= 0xff) tm->tm_sec = -1; if (tm->tm_year > 9999 || tm->tm_mon >= 12 || tm->tm_mday == 0 || tm->tm_mday >= 32 || tm->tm_wday >= 7 || tm->tm_hour >= 24 || tm->tm_min >= 60 || tm->tm_sec >= 60) return -EINVAL; return 0; } static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); unsigned int rcr1; struct rtc_time *tm = &wkalrm->time; int mon, err; err = sh_rtc_check_alarm(tm); if (unlikely(err < 0)) return err; spin_lock_irq(&rtc->lock); /* disable alarm interrupt and clear the alarm flag */ rcr1 = readb(rtc->regbase + RCR1); rcr1 &= ~(RCR1_AF | RCR1_AIE); writeb(rcr1, rtc->regbase + RCR1); /* set alarm time */ sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR); sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR); sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR); sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR); sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR); mon = tm->tm_mon; if (mon >= 0) mon += 1; sh_rtc_write_alarm_value(rtc, mon, RMONAR); if (wkalrm->enabled) { rcr1 |= RCR1_AIE; writeb(rcr1, rtc->regbase + RCR1); } spin_unlock_irq(&rtc->lock); return 0; } static struct rtc_class_ops sh_rtc_ops = { .read_time = sh_rtc_read_time, .set_time = sh_rtc_set_time, .read_alarm = sh_rtc_read_alarm, .set_alarm = sh_rtc_set_alarm, .proc = sh_rtc_proc, .alarm_irq_enable = sh_rtc_alarm_irq_enable, }; static int __init sh_rtc_probe(struct platform_device *pdev) { struct sh_rtc *rtc; struct resource *res; struct rtc_time r; char clk_name[6]; int clk_id, ret; rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL); if (unlikely(!rtc)) return -ENOMEM; spin_lock_init(&rtc->lock); /* get periodic/carry/alarm irqs */ ret = platform_get_irq(pdev, 0); if (unlikely(ret <= 0)) { ret = -ENOENT; dev_err(&pdev->dev, "No IRQ resource\n"); goto err_badres; } rtc->periodic_irq = ret; rtc->carry_irq = platform_get_irq(pdev, 1); rtc->alarm_irq = platform_get_irq(pdev, 2); res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (unlikely(res == NULL)) { ret = -ENOENT; dev_err(&pdev->dev, "No IO resource\n"); goto err_badres; } rtc->regsize = resource_size(res); rtc->res = request_mem_region(res->start, rtc->regsize, pdev->name); if (unlikely(!rtc->res)) { ret = -EBUSY; goto err_badres; } rtc->regbase = ioremap_nocache(rtc->res->start, rtc->regsize); if (unlikely(!rtc->regbase)) { ret = -EINVAL; goto err_badmap; } clk_id = pdev->id; /* With a single device, the clock id is still "rtc0" */ if (clk_id < 0) clk_id = 0; snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id); rtc->clk = clk_get(&pdev->dev, clk_name); if (IS_ERR(rtc->clk)) { /* * No error handling for rtc->clk intentionally, not all * platforms will have a unique clock for the RTC, and * the clk API can handle the struct clk pointer being * NULL. */ rtc->clk = NULL; } clk_enable(rtc->clk); rtc->capabilities = RTC_DEF_CAPABILITIES; if (pdev->dev.platform_data) { struct sh_rtc_platform_info *pinfo = pdev->dev.platform_data; /* * Some CPUs have special capabilities in addition to the * default set. Add those in here. */ rtc->capabilities |= pinfo->capabilities; } if (rtc->carry_irq <= 0) { /* register shared periodic/carry/alarm irq */ ret = request_irq(rtc->periodic_irq, sh_rtc_shared, 0, "sh-rtc", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request IRQ failed with %d, IRQ %d\n", ret, rtc->periodic_irq); goto err_unmap; } } else { /* register periodic/carry/alarm irqs */ ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, 0, "sh-rtc period", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request period IRQ failed with %d, IRQ %d\n", ret, rtc->periodic_irq); goto err_unmap; } ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, 0, "sh-rtc carry", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request carry IRQ failed with %d, IRQ %d\n", ret, rtc->carry_irq); free_irq(rtc->periodic_irq, rtc); goto err_unmap; } ret = request_irq(rtc->alarm_irq, sh_rtc_alarm, 0, "sh-rtc alarm", rtc); if (unlikely(ret)) { dev_err(&pdev->dev, "request alarm IRQ failed with %d, IRQ %d\n", ret, rtc->alarm_irq); free_irq(rtc->carry_irq, rtc); free_irq(rtc->periodic_irq, rtc); goto err_unmap; } } platform_set_drvdata(pdev, rtc); /* everything disabled by default */ sh_rtc_irq_set_freq(&pdev->dev, 0); sh_rtc_irq_set_state(&pdev->dev, 0); sh_rtc_setaie(&pdev->dev, 0); sh_rtc_setcie(&pdev->dev, 0); rtc->rtc_dev = rtc_device_register("sh", &pdev->dev, &sh_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc_dev)) { ret = PTR_ERR(rtc->rtc_dev); free_irq(rtc->periodic_irq, rtc); free_irq(rtc->carry_irq, rtc); free_irq(rtc->alarm_irq, rtc); goto err_unmap; } rtc->rtc_dev->max_user_freq = 256; /* reset rtc to epoch 0 if time is invalid */ if (rtc_read_time(rtc->rtc_dev, &r) < 0) { rtc_time_to_tm(0, &r); rtc_set_time(rtc->rtc_dev, &r); } device_init_wakeup(&pdev->dev, 1); return 0; err_unmap: clk_disable(rtc->clk); clk_put(rtc->clk); iounmap(rtc->regbase); err_badmap: release_mem_region(rtc->res->start, rtc->regsize); err_badres: kfree(rtc); return ret; } static int __exit sh_rtc_remove(struct platform_device *pdev) { struct sh_rtc *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc->rtc_dev); sh_rtc_irq_set_state(&pdev->dev, 0); sh_rtc_setaie(&pdev->dev, 0); sh_rtc_setcie(&pdev->dev, 0); free_irq(rtc->periodic_irq, rtc); if (rtc->carry_irq > 0) { free_irq(rtc->carry_irq, rtc); free_irq(rtc->alarm_irq, rtc); } iounmap(rtc->regbase); release_mem_region(rtc->res->start, rtc->regsize); clk_disable(rtc->clk); clk_put(rtc->clk); platform_set_drvdata(pdev, NULL); kfree(rtc); return 0; } static void sh_rtc_set_irq_wake(struct device *dev, int enabled) { struct platform_device *pdev = to_platform_device(dev); struct sh_rtc *rtc = platform_get_drvdata(pdev); irq_set_irq_wake(rtc->periodic_irq, enabled); if (rtc->carry_irq > 0) { irq_set_irq_wake(rtc->carry_irq, enabled); irq_set_irq_wake(rtc->alarm_irq, enabled); } } static int sh_rtc_suspend(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 1); return 0; } static int sh_rtc_resume(struct device *dev) { if (device_may_wakeup(dev)) sh_rtc_set_irq_wake(dev, 0); return 0; } static const struct dev_pm_ops sh_rtc_dev_pm_ops = { .suspend = sh_rtc_suspend, .resume = sh_rtc_resume, }; static struct platform_driver sh_rtc_platform_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &sh_rtc_dev_pm_ops, }, .remove = __exit_p(sh_rtc_remove), }; static int __init sh_rtc_init(void) { return platform_driver_probe(&sh_rtc_platform_driver, sh_rtc_probe); } static void __exit sh_rtc_exit(void) { platform_driver_unregister(&sh_rtc_platform_driver); } module_init(sh_rtc_init); module_exit(sh_rtc_exit); MODULE_DESCRIPTION("SuperH on-chip RTC driver"); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, " "Jamie Lenehan <lenehan@twibble.org>, " "Angelo Castello <angelo.castello@st.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
chruck/cpsc8220
linux-4.3.3/arch/powerpc/sysdev/cpm2.c
12819
8753
/* * General Purpose functions for the global management of the * 8260 Communication Processor Module. * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) * 2.3.99 Updates * * 2006 (c) MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ /* * * In addition to the individual control of the communication * channels, there are a few functions that globally affect the * communication processor. * * Buffer descriptors must be allocated from the dual ported memory * space. The allocator for that is here. When the communication * process is reset, we reclaim the memory available. There is * currently no deallocator for this memory. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mpc8260.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/cpm2.h> #include <asm/rheap.h> #include <asm/fs_pd.h> #include <sysdev/fsl_soc.h> cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */ /* We allocate this here because it is used almost exclusively for * the communication processor devices. */ cpm2_map_t __iomem *cpm2_immr; EXPORT_SYMBOL(cpm2_immr); #define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount of space for CPM as it is larger than on PQ2 */ void __init cpm2_reset(void) { #ifdef CONFIG_PPC_85xx cpm2_immr = ioremap(get_immrbase() + 0x80000, CPM_MAP_SIZE); #else cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); #endif /* Reclaim the DP memory for our use. */ cpm_muram_init(); /* Tell everyone where the comm processor resides. */ cpmp = &cpm2_immr->im_cpm; #ifndef CONFIG_PPC_EARLY_DEBUG_CPM /* Reset the CPM. */ cpm_command(CPM_CR_RST, 0); #endif } static DEFINE_SPINLOCK(cmd_lock); #define MAX_CR_CMD_LOOPS 10000 int cpm_command(u32 command, u8 opcode) { int i, ret; unsigned long flags; spin_lock_irqsave(&cmd_lock, flags); ret = 0; out_be32(&cpmp->cp_cpcr, command | opcode | CPM_CR_FLG); for (i = 0; i < MAX_CR_CMD_LOOPS; i++) if ((in_be32(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0) goto out; printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__); ret = -EIO; out: spin_unlock_irqrestore(&cmd_lock, flags); return ret; } EXPORT_SYMBOL(cpm_command); /* Set a baud rate generator. This needs lots of work. There are * eight BRGs, which can be connected to the CPM channels or output * as clocks. The BRGs are in two different block of internal * memory mapped space. * The baud rate clock is the system clock divided by something. * It was set up long ago during the initial boot phase and is * is given to us. * Baud rate clocks are zero-based in the driver code (as that maps * to port numbers). Documentation uses 1-based numbering. */ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src) { u32 __iomem *bp; u32 val; /* This is good enough to get SMCs running..... */ if (brg < 4) { bp = cpm2_map_size(im_brgc1, 16); } else { bp = cpm2_map_size(im_brgc5, 16); brg -= 4; } bp += brg; /* Round the clock divider to the nearest integer. */ val = (((clk * 2 / rate) - 1) & ~1) | CPM_BRG_EN | src; if (div16) val |= CPM_BRG_DIV16; out_be32(bp, val); cpm2_unmap(bp); } EXPORT_SYMBOL(__cpm2_setbrg); int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) { int ret = 0; int shift; int i, bits = 0; cpmux_t __iomem *im_cpmux; u32 __iomem *reg; u32 mask = 7; u8 clk_map[][3] = { {CPM_CLK_FCC1, CPM_BRG5, 0}, {CPM_CLK_FCC1, CPM_BRG6, 1}, {CPM_CLK_FCC1, CPM_BRG7, 2}, {CPM_CLK_FCC1, CPM_BRG8, 3}, {CPM_CLK_FCC1, CPM_CLK9, 4}, {CPM_CLK_FCC1, CPM_CLK10, 5}, {CPM_CLK_FCC1, CPM_CLK11, 6}, {CPM_CLK_FCC1, CPM_CLK12, 7}, {CPM_CLK_FCC2, CPM_BRG5, 0}, {CPM_CLK_FCC2, CPM_BRG6, 1}, {CPM_CLK_FCC2, CPM_BRG7, 2}, {CPM_CLK_FCC2, CPM_BRG8, 3}, {CPM_CLK_FCC2, CPM_CLK13, 4}, {CPM_CLK_FCC2, CPM_CLK14, 5}, {CPM_CLK_FCC2, CPM_CLK15, 6}, {CPM_CLK_FCC2, CPM_CLK16, 7}, {CPM_CLK_FCC3, CPM_BRG5, 0}, {CPM_CLK_FCC3, CPM_BRG6, 1}, {CPM_CLK_FCC3, CPM_BRG7, 2}, {CPM_CLK_FCC3, CPM_BRG8, 3}, {CPM_CLK_FCC3, CPM_CLK13, 4}, {CPM_CLK_FCC3, CPM_CLK14, 5}, {CPM_CLK_FCC3, CPM_CLK15, 6}, {CPM_CLK_FCC3, CPM_CLK16, 7}, {CPM_CLK_SCC1, CPM_BRG1, 0}, {CPM_CLK_SCC1, CPM_BRG2, 1}, {CPM_CLK_SCC1, CPM_BRG3, 2}, {CPM_CLK_SCC1, CPM_BRG4, 3}, {CPM_CLK_SCC1, CPM_CLK11, 4}, {CPM_CLK_SCC1, CPM_CLK12, 5}, {CPM_CLK_SCC1, CPM_CLK3, 6}, {CPM_CLK_SCC1, CPM_CLK4, 7}, {CPM_CLK_SCC2, CPM_BRG1, 0}, {CPM_CLK_SCC2, CPM_BRG2, 1}, {CPM_CLK_SCC2, CPM_BRG3, 2}, {CPM_CLK_SCC2, CPM_BRG4, 3}, {CPM_CLK_SCC2, CPM_CLK11, 4}, {CPM_CLK_SCC2, CPM_CLK12, 5}, {CPM_CLK_SCC2, CPM_CLK3, 6}, {CPM_CLK_SCC2, CPM_CLK4, 7}, {CPM_CLK_SCC3, CPM_BRG1, 0}, {CPM_CLK_SCC3, CPM_BRG2, 1}, {CPM_CLK_SCC3, CPM_BRG3, 2}, {CPM_CLK_SCC3, CPM_BRG4, 3}, {CPM_CLK_SCC3, CPM_CLK5, 4}, {CPM_CLK_SCC3, CPM_CLK6, 5}, {CPM_CLK_SCC3, CPM_CLK7, 6}, {CPM_CLK_SCC3, CPM_CLK8, 7}, {CPM_CLK_SCC4, CPM_BRG1, 0}, {CPM_CLK_SCC4, CPM_BRG2, 1}, {CPM_CLK_SCC4, CPM_BRG3, 2}, {CPM_CLK_SCC4, CPM_BRG4, 3}, {CPM_CLK_SCC4, CPM_CLK5, 4}, {CPM_CLK_SCC4, CPM_CLK6, 5}, {CPM_CLK_SCC4, CPM_CLK7, 6}, {CPM_CLK_SCC4, CPM_CLK8, 7}, }; im_cpmux = cpm2_map(im_cpmux); switch (target) { case CPM_CLK_SCC1: reg = &im_cpmux->cmx_scr; shift = 24; break; case CPM_CLK_SCC2: reg = &im_cpmux->cmx_scr; shift = 16; break; case CPM_CLK_SCC3: reg = &im_cpmux->cmx_scr; shift = 8; break; case CPM_CLK_SCC4: reg = &im_cpmux->cmx_scr; shift = 0; break; case CPM_CLK_FCC1: reg = &im_cpmux->cmx_fcr; shift = 24; break; case CPM_CLK_FCC2: reg = &im_cpmux->cmx_fcr; shift = 16; break; case CPM_CLK_FCC3: reg = &im_cpmux->cmx_fcr; shift = 8; break; default: printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(clk_map); i++) { if (clk_map[i][0] == target && clk_map[i][1] == clock) { bits = clk_map[i][2]; break; } } if (i == ARRAY_SIZE(clk_map)) ret = -EINVAL; bits <<= shift; mask <<= shift; if (mode == CPM_CLK_RTX) { bits |= bits << 3; mask |= mask << 3; } else if (mode == CPM_CLK_RX) { bits <<= 3; mask <<= 3; } out_be32(reg, (in_be32(reg) & ~mask) | bits); cpm2_unmap(im_cpmux); return ret; } int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) { int ret = 0; int shift; int i, bits = 0; cpmux_t __iomem *im_cpmux; u8 __iomem *reg; u8 mask = 3; u8 clk_map[][3] = { {CPM_CLK_SMC1, CPM_BRG1, 0}, {CPM_CLK_SMC1, CPM_BRG7, 1}, {CPM_CLK_SMC1, CPM_CLK7, 2}, {CPM_CLK_SMC1, CPM_CLK9, 3}, {CPM_CLK_SMC2, CPM_BRG2, 0}, {CPM_CLK_SMC2, CPM_BRG8, 1}, {CPM_CLK_SMC2, CPM_CLK4, 2}, {CPM_CLK_SMC2, CPM_CLK15, 3}, }; im_cpmux = cpm2_map(im_cpmux); switch (target) { case CPM_CLK_SMC1: reg = &im_cpmux->cmx_smr; mask = 3; shift = 4; break; case CPM_CLK_SMC2: reg = &im_cpmux->cmx_smr; mask = 3; shift = 0; break; default: printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n"); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(clk_map); i++) { if (clk_map[i][0] == target && clk_map[i][1] == clock) { bits = clk_map[i][2]; break; } } if (i == ARRAY_SIZE(clk_map)) ret = -EINVAL; bits <<= shift; mask <<= shift; out_8(reg, (in_8(reg) & ~mask) | bits); cpm2_unmap(im_cpmux); return ret; } struct cpm2_ioports { u32 dir, par, sor, odr, dat; u32 res[3]; }; void cpm2_set_pin(int port, int pin, int flags) { struct cpm2_ioports __iomem *iop = (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport; pin = 1 << (31 - pin); if (flags & CPM_PIN_OUTPUT) setbits32(&iop[port].dir, pin); else clrbits32(&iop[port].dir, pin); if (!(flags & CPM_PIN_GPIO)) setbits32(&iop[port].par, pin); else clrbits32(&iop[port].par, pin); if (flags & CPM_PIN_SECONDARY) setbits32(&iop[port].sor, pin); else clrbits32(&iop[port].sor, pin); if (flags & CPM_PIN_OPENDRAIN) setbits32(&iop[port].odr, pin); else clrbits32(&iop[port].odr, pin); } static int cpm_init_par_io(void) { struct device_node *np; for_each_compatible_node(np, NULL, "fsl,cpm2-pario-bank") cpm2_gpiochip_add32(np); return 0; } arch_initcall(cpm_init_par_io);
gpl-2.0
MoKee/android_kernel_motorola_ghost
kernel/trace/trace.c
20
118115
/* * ring buffer based function tracer * * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ #define REALLY_WANT_DEBUGFS #include <linux/ring_buffer.h> #include <generated/utsrelease.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/kprobes.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/coresight-stm.h> #include "trace.h" #include "trace_output.h" /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ int ring_buffer_expanded; /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If a tracer is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static struct tracer_flags dummy_tracer_flags = { .val = 0, .opts = dummy_tracer_opt }; static int dummy_set_flag(u32 old_flags, u32 bit, int set) { return 0; } /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; DEFINE_PER_CPU(int, ftrace_cpu_disabled); static inline void ftrace_disable_cpu(void) { preempt_disable(); __this_cpu_inc(ftrace_cpu_disabled); } static inline void ftrace_enable_cpu(void) { __this_cpu_dec(ftrace_cpu_disabled); preempt_enable(); } cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops */ enum ftrace_dump_mode ftrace_dump_on_oops; static int tracing_set_tracer(const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static int __init set_cmdline_ftrace(char *str) { strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ ring_buffer_expanded = 1; return 1; } __setup("ftrace=", set_cmdline_ftrace); static int __init set_ftrace_dump_on_oops(char *str) { if (*str++ != '=' || !*str) { ftrace_dump_on_oops = DUMP_ALL; return 1; } if (!strcmp("orig_cpu", str)) { ftrace_dump_on_oops = DUMP_ORIG; return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); unsigned long long ns2usecs(cycle_t nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } /* * The global_trace is the descriptor that holds the tracing * buffers for the live tracing. For each CPU, it contains * a link list of pages that will store trace entries. The * page descriptor of the pages in the memory is used to hold * the link list by linking the lru item in the page descriptor * to each of the pages in the buffer per CPU. * * For each active CPU there is a data field that holds the * pages for the buffer for that CPU. Each CPU has the same number * of pages allocated for its buffer. */ static struct trace_array global_trace; static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); int filter_current_check_discard(struct ring_buffer *buffer, struct ftrace_event_call *call, void *rec, struct ring_buffer_event *event) { return filter_check_discard(call, rec, buffer, event); } EXPORT_SYMBOL_GPL(filter_current_check_discard); cycle_t ftrace_now(int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!global_trace.buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(global_trace.buffer, cpu); ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts); return ts; } /* * The max_tr is used to snapshot the global_trace when a maximum * latency is reached. Some tracers will use this to store a maximum * trace while it continues examining live traces. * * The buffers for the max_tr are set up the same as the global_trace. * When a snapshot is taken, the link list of the max_tr is swapped * with the link list of the global_trace and the buffers are reset for * the global_trace so the tracing can continue. */ static struct trace_array max_tr; static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); /* tracer_enabled is used to toggle activation of a tracer */ static int tracer_enabled = 1; /** * tracing_is_enabled - return tracer_enabled status * * This function is used by other tracers to know the status * of the tracer_enabled flag. Tracers may use this function * to know if it should enable their features when starting * up. See irqsoff tracer for an example (start_irqsoff_tracer). */ int tracing_is_enabled(void) { return tracer_enabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 262144UL /* 1024 * 256 */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* current_trace points to the tracer that is currently active */ static struct tracer *current_trace __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ static DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewrited * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == TRACE_PIPE_ALL_CPU) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == TRACE_PIPE_ALL_CPU) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif /* trace_wait is a waitqueue for tasks blocked on trace_poll */ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); /* trace_flags holds trace_options default values */ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | TRACE_ITER_IRQ_INFO; static int trace_stop_count; static DEFINE_RAW_SPINLOCK(tracing_start_lock); static void wakeup_work_handler(struct work_struct *work) { wake_up(&trace_wait); } static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { if (global_trace.buffer) ring_buffer_record_on(global_trace.buffer); /* * This flag is only looked at when buffers haven't been * allocated yet. We don't really care about the race * between setting this flag and actually turning * on the buffer. */ global_trace.buffer_disabled = 0; } EXPORT_SYMBOL_GPL(tracing_on); /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { if (global_trace.buffer) ring_buffer_record_off(global_trace.buffer); /* * This flag is only looked at when buffers haven't been * allocated yet. We don't really care about the race * between setting this flag and actually turning * on the buffer. */ global_trace.buffer_disabled = 1; } EXPORT_SYMBOL_GPL(tracing_off); /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { if (global_trace.buffer) return ring_buffer_record_is_on(global_trace.buffer); return !global_trace.buffer_disabled; } EXPORT_SYMBOL_GPL(tracing_is_on); /** * trace_wake_up - wake up tasks waiting for trace input * * Schedules a delayed work to wake up any task that is blocked on the * trace_wait queue. These is used with trace_poll for tasks polling the * trace. */ void trace_wake_up(void) { const unsigned long delay = msecs_to_jiffies(2); if (trace_flags & TRACE_ITER_BLOCK) return; schedule_delayed_work(&wakeup_work, delay); } static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* nr_entries can not be zero */ if (buf_size == 0) return 0; trace_buf_size = buf_size; return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshhold; int ret; if (!str) return 0; ret = strict_strtoul(str, 0, &threshhold); if (ret < 0) return 0; tracing_thresh = threshhold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* These must match the bit postions in trace_iterator_flags */ static const char *trace_options[] = { "print-parent", "sym-offset", "sym-addr", "verbose", "raw", "hex", "bin", "block", "stacktrace", "trace_printk", "ftrace_preempt", "branch", "annotate", "userstacktrace", "sym-userobj", "printk-msg-only", "context-info", "latency-format", "sleep-time", "graph-time", "record-cmd", "overwrite", "disable_on_free", "irq-info", NULL }; static struct { u64 (*func)(void); const char *name; } trace_clocks[] = { { trace_clock_local, "local" }, { trace_clock_global, "global" }, { trace_clock_counter, "counter" }, }; int trace_clock_id; /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* only spaces were written */ if (isspace(ch)) { *ppos += read; ret = read; goto out; } parser->idx = 0; } /* read the non-space input */ while (cnt && !isspace(ch)) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch)) { parser->buffer[parser->idx] = 0; parser->cont = false; } else { parser->cont = true; parser->buffer[parser->idx++] = ch; } *ppos += read; ret = read; out: return ret; } ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) { int len; int ret; if (!cnt) return 0; if (s->len <= s->readpos) return -EBUSY; len = s->len - s->readpos; if (cnt > len) cnt = len; ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); if (ret == cnt) return -EFAULT; cnt -= ret; s->readpos += cnt; return cnt; } static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; void *ret; if (s->len <= s->readpos) return -EBUSY; len = s->len - s->readpos; if (cnt > len) cnt = len; ret = memcpy(buf, s->buffer + s->readpos, cnt); if (!ret) return -EFAULT; s->readpos += cnt; return cnt; } /* * ftrace_max_lock is used to protect the swapping of buffers * when taking a max snapshot. The buffers themselves are * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ static arch_spinlock_t ftrace_max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/debug/tracing/latency_trace) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct trace_array_cpu *data = tr->data[cpu]; struct trace_array_cpu *max_data; max_tr.cpu = cpu; max_tr.time_start = data->preempt_timestamp; max_data = max_tr.data[cpu]; max_data->saved_latency = tracing_max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct ring_buffer *buf; if (trace_stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!current_trace->use_max_tr) { WARN_ON_ONCE(1); return; } arch_spin_lock(&ftrace_max_lock); buf = tr->buffer; tr->buffer = max_tr.buffer; max_tr.buffer = buf; __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&ftrace_max_lock); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr - tracer * @tsk - task with the latency * @cpu - the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (trace_stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!current_trace->use_max_tr) { WARN_ON_ONCE(1); return; } arch_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. */ trace_array_printk(&max_tr, _THIS_IP_, "Failed to swap buffers due to commit in progress\n"); } ftrace_enable_cpu(); WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&ftrace_max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ /** * register_tracer - register a tracer with the ftrace system. * @type - the plugin for the tracer * * Register a new plugin tracer. */ int register_tracer(struct tracer *type) __releases(kernel_lock) __acquires(kernel_lock) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } mutex_lock(&trace_types_lock); tracing_selftest_running = true; for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) type->flags = &dummy_tracer_flags; else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; if (!type->wait_pipe) type->wait_pipe = default_wait_pipe; #ifdef CONFIG_FTRACE_STARTUP_TEST if (type->selftest && !tracing_selftest_disabled) { struct tracer *saved_tracer = current_trace; struct trace_array *tr = &global_trace; /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(tr); current_trace = type; /* If we expanded the buffers, make sure the max is expanded too */ if (ring_buffer_expanded && type->use_max_tr) ring_buffer_resize(max_tr.buffer, trace_buf_size); /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); goto out; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(tr); /* Shrink the max buffer again */ if (ring_buffer_expanded && type->use_max_tr) ring_buffer_resize(max_tr.buffer, 1); printk(KERN_CONT "PASSED\n"); } #endif type->next = trace_types; trace_types = type; out: tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(type->name); default_bootup_tracer = NULL; /* disable other selftests, since this will break it. */ tracing_selftest_disabled = 1; #ifdef CONFIG_FTRACE_STARTUP_TEST printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", type->name); #endif out_unlock: return ret; } void unregister_tracer(struct tracer *type) { struct tracer **t; mutex_lock(&trace_types_lock); for (t = &trace_types; *t; t = &(*t)->next) { if (*t == type) goto found; } pr_info("Tracer %s not registered\n", type->name); goto out; found: *t = (*t)->next; if (type == current_trace && tracer_enabled) { tracer_enabled = 0; tracing_stop(); if (current_trace->stop) current_trace->stop(&global_trace); current_trace = &nop_trace; } out: mutex_unlock(&trace_types_lock); } static void __tracing_reset(struct ring_buffer *buffer, int cpu) { ftrace_disable_cpu(); ring_buffer_reset_cpu(buffer, cpu); ftrace_enable_cpu(); } void tracing_reset(struct trace_array *tr, int cpu) { struct ring_buffer *buffer = tr->buffer; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_sched(); __tracing_reset(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct trace_array *tr) { struct ring_buffer *buffer = tr->buffer; int cpu; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_sched(); tr->time_start = ftrace_now(tr->cpu); for_each_online_cpu(cpu) __tracing_reset(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_current(int cpu) { tracing_reset(&global_trace, cpu); } void tracing_reset_current_online_cpus(void) { tracing_reset_online_cpus(&global_trace); } #define SAVED_CMDLINES 128 #define NO_CMDLINE_MAP UINT_MAX static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; static void trace_init_cmdlines(void) { memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline)); memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid)); cmdline_idx = 0; } int is_tracing_stopped(void) { return trace_stop_count; } /** * ftrace_off_permanent - disable all ftrace code permanently * * This should only be called when a serious anomally has * been detected. This will turn off the function tracing, * ring buffers, and other tracing utilites. It takes no * locks and can be called from any context. */ void ftrace_off_permanent(void) { tracing_disabled = 1; ftrace_stop(); tracing_off_permanent(); } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { struct ring_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&tracing_start_lock, flags); if (--trace_stop_count) { if (trace_stop_count < 0) { /* Someone screwed up their debugging */ WARN_ON_ONCE(1); trace_stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&ftrace_max_lock); buffer = global_trace.buffer; if (buffer) ring_buffer_record_enable(buffer); buffer = max_tr.buffer; if (buffer) ring_buffer_record_enable(buffer); arch_spin_unlock(&ftrace_max_lock); ftrace_start(); out: raw_spin_unlock_irqrestore(&tracing_start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { struct ring_buffer *buffer; unsigned long flags; ftrace_stop(); raw_spin_lock_irqsave(&tracing_start_lock, flags); if (trace_stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&ftrace_max_lock); buffer = global_trace.buffer; if (buffer) ring_buffer_record_disable(buffer); buffer = max_tr.buffer; if (buffer) ring_buffer_record_disable(buffer); arch_spin_unlock(&ftrace_max_lock); out: raw_spin_unlock_irqrestore(&tracing_start_lock, flags); } void trace_stop_cmdline_recording(void); static void trace_save_cmdline(struct task_struct *tsk) { unsigned pid, idx; if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT)) return; /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ if (!arch_spin_trylock(&trace_cmdline_lock)) return; idx = map_pid_to_cmdline[tsk->pid]; if (idx == NO_CMDLINE_MAP) { idx = (cmdline_idx + 1) % SAVED_CMDLINES; /* * Check whether the cmdline buffer at idx has a pid * mapped. We are going to overwrite that entry so we * need to clear the map_pid_to_cmdline. Otherwise we * would read the new comm for the old pid. */ pid = map_cmdline_to_pid[idx]; if (pid != NO_CMDLINE_MAP) map_pid_to_cmdline[pid] = NO_CMDLINE_MAP; map_cmdline_to_pid[idx] = tsk->pid; map_pid_to_cmdline[tsk->pid] = idx; cmdline_idx = idx; } memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); arch_spin_unlock(&trace_cmdline_lock); } void trace_find_cmdline(int pid, char comm[]) { unsigned map; if (!pid) { strcpy(comm, "<idle>"); return; } if (WARN_ON_ONCE(pid < 0)) { strcpy(comm, "<XXX>"); return; } if (pid > PID_MAX_DEFAULT) { strcpy(comm, "<...>"); return; } preempt_disable(); arch_spin_lock(&trace_cmdline_lock); map = map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strcpy(comm, saved_cmdlines[map]); else strcpy(comm, "<...>"); arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } void tracing_record_cmdline(struct task_struct *tsk) { if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || !tracing_is_on()) return; trace_save_cmdline(tsk); } void tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, int pc) { struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; entry->padding = 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | #else TRACE_FLAG_IRQS_NOSUPPORT | #endif ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); struct ring_buffer_event * trace_buffer_lock_reserve(struct ring_buffer *buffer, int type, unsigned long len, unsigned long flags, int pc) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, flags, pc); ent->type = type; } return event; } static inline void __trace_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, int wake) { ring_buffer_unlock_commit(buffer, event); ftrace_trace_stack(buffer, flags, 6, pc); ftrace_trace_userstack(buffer, flags, pc); if (wake) trace_wake_up(); } void trace_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc) { __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); } struct ring_buffer_event * trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, int type, unsigned long len, unsigned long flags, int pc) { *current_rb = global_trace.buffer; return trace_buffer_lock_reserve(*current_rb, type, len, flags, pc); } EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc) { __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); } EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc) { __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); } EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, struct ring_buffer_event *event, unsigned long flags, int pc, struct pt_regs *regs) { ring_buffer_unlock_commit(buffer, event); ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); ftrace_trace_userstack(buffer, flags, pc); } EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); void trace_current_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { ring_buffer_discard_commit(buffer, event); } EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_function; struct ring_buffer *buffer = tr->buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; /* If we are reading the ring buffer, don't trace */ if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) return; event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), flags, pc); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); } void ftrace(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { if (likely(!atomic_read(&data->disabled))) trace_function(tr, ip, parent_ip, flags, pc); } #ifdef CONFIG_STACKTRACE #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long)) struct ftrace_stack { unsigned long calls[FTRACE_STACK_MAX_ENTRIES]; }; static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { struct ftrace_event_call *call = &event_kernel_stack; struct ring_buffer_event *event; struct stack_entry *entry; struct stack_trace trace; int use_stack; int size = FTRACE_STACK_ENTRIES; trace.nr_entries = 0; trace.skip = skip; /* * Since events can happen in NMIs there's no safe way to * use the per cpu ftrace_stacks. We reserve it and if an interrupt * or NMI comes in, it will just have to use the default * FTRACE_STACK_SIZE. */ preempt_disable_notrace(); use_stack = ++__get_cpu_var(ftrace_stack_reserve); /* * We don't need any atomic variables, just a barrier. * If an interrupt comes in, we don't care, because it would * have exited and put the counter back to what we want. * We just need a barrier to keep gcc from moving things * around. */ barrier(); if (use_stack == 1) { trace.entries = &__get_cpu_var(ftrace_stack).calls[0]; trace.max_entries = FTRACE_STACK_MAX_ENTRIES; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); if (trace.nr_entries > size) size = trace.nr_entries; } else /* From now on, use_stack is a boolean */ use_stack = 0; size *= sizeof(unsigned long); event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size, flags, pc); if (!event) goto out; entry = ring_buffer_event_data(event); memset(&entry->caller, 0, size); if (use_stack) memcpy(&entry->caller, trace.entries, trace.nr_entries * sizeof(unsigned long)); else { trace.max_entries = FTRACE_STACK_ENTRIES; trace.entries = entry->caller; if (regs) save_stack_trace_regs(regs, &trace); else save_stack_trace(&trace); } entry->size = trace.nr_entries; if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __get_cpu_var(ftrace_stack_reserve)--; preempt_enable_notrace(); } void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, int skip, int pc, struct pt_regs *regs) { if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, regs); } void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, int skip, int pc) { if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(buffer, flags, skip, pc, NULL); } void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, int pc) { __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL); } /** * trace_dump_stack - record a stack back trace in the trace buffer */ void trace_dump_stack(void) { unsigned long flags; if (tracing_disabled || tracing_selftest_running) return; local_save_flags(flags); /* skipping 3 traces, seems to get us at the caller of this function */ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL); } static DEFINE_PER_CPU(int, user_stack_count); void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) { struct ftrace_event_call *call = &event_user_stack; struct ring_buffer_event *event; struct userstack_entry *entry; struct stack_trace trace; if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), flags, pc); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); trace.nr_entries = 0; trace.max_entries = FTRACE_STACK_ENTRIES; trace.skip = 0; trace.entries = entry->caller; save_stack_trace_user(&trace); if (!filter_check_discard(call, entry, buffer, event)) ring_buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #ifdef UNUSED static void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, flags, preempt_count()); } #endif /* UNUSED */ #endif /* CONFIG_STACKTRACE */ /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { static arch_spinlock_t trace_buf_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array *tr = &global_trace; struct trace_array_cpu *data; struct bprint_entry *entry; unsigned long flags; int disable; int cpu, len = 0, size, pc; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disable = atomic_inc_return(&data->disabled); if (unlikely(disable != 1)) goto out; /* Lockdep uses trace_printk for lock tracing */ local_irq_save(flags); arch_spin_lock(&trace_buf_lock); len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); if (len > TRACE_BUF_SIZE || len < 0) goto out_unlock; size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, flags, pc); if (!event) goto out_unlock; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, trace_buf, sizeof(u32) * len); if (!filter_check_discard(call, entry, buffer, event)) { ring_buffer_unlock_commit(buffer, event); ftrace_trace_stack(buffer, flags, 6, pc); } out_unlock: arch_spin_unlock(&trace_buf_lock); local_irq_restore(flags); out: atomic_dec_return(&data->disabled); preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; struct ring_buffer_event *event; struct ring_buffer *buffer; struct trace_array_cpu *data; int cpu, len = 0, size, pc; struct print_entry *entry; unsigned long irq_flags; int disable; if (tracing_disabled || tracing_selftest_running) return 0; pc = preempt_count(); preempt_disable_notrace(); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disable = atomic_inc_return(&data->disabled); if (unlikely(disable != 1)) goto out; pause_graph_tracing(); raw_local_irq_save(irq_flags); arch_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; buffer = tr->buffer; event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, pc); if (!event) goto out_unlock; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, trace_buf, len); entry->buf[len] = '\0'; if (!filter_check_discard(call, entry, buffer, event)) { stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1); ring_buffer_unlock_commit(buffer, event); ftrace_trace_stack(buffer, irq_flags, 6, pc); } out_unlock: arch_spin_unlock(&trace_buf_lock); raw_local_irq_restore(irq_flags); unpause_graph_tracing(); out: atomic_dec_return(&data->disabled); preempt_enable_notrace(); return len; } int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { /* Don't allow ftrace to trace into the ring buffers */ ftrace_disable_cpu(); iter->idx++; if (iter->buffer_iter[iter->cpu]) ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ftrace_enable_cpu(); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; /* Don't allow ftrace to trace into the ring buffers */ ftrace_disable_cpu(); if (buf_iter) event = ring_buffer_iter_peek(buf_iter, ts); else event = ring_buffer_peek(iter->tr->buffer, cpu, ts, lost_events); ftrace_enable_cpu(); if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct ring_buffer *buffer = iter->tr->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > TRACE_PIPE_ALL_CPU) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { return __find_next_entry(iter, ent_cpu, NULL, ent_ts); } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { /* Don't allow ftrace to trace into the ring buffers */ ftrace_disable_cpu(); ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts, &iter->lost_events); ftrace_enable_cpu(); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct trace_array *tr = iter->tr; struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; tr->data[cpu]->skipped_entries = 0; if (!iter->buffer_iter[cpu]) return; buf_iter = iter->buffer_iter[cpu]; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while ((event = ring_buffer_iter_peek(buf_iter, &ts))) { if (ts >= iter->tr->time_start) break; entries++; ring_buffer_read(buf_iter, NULL); } tr->data[cpu]->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; static struct tracer *old_tracer; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); if (unlikely(old_tracer != current_trace && current_trace)) { old_tracer = current_trace; *iter->trace = *current_trace; } mutex_unlock(&trace_types_lock); atomic_inc(&trace_record_cmdline_disabled); if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; ftrace_disable_cpu(); if (cpu_file == TRACE_PIPE_ALL_CPU) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); ftrace_enable_cpu(); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; atomic_dec(&trace_record_cmdline_disabled); trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries) { unsigned long count; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { count = ring_buffer_entries_cpu(tr->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (tr->data[cpu]->skipped_entries) { count -= tr->data[cpu]->skipped_entries; /* total is the same as the entries */ *total += count; } else *total += count + ring_buffer_overrun_cpu(tr->buffer, cpu); *entries += count; } } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n"); seq_puts(m, "# / _-----=> irqs-off \n"); seq_puts(m, "# | / _----=> need-resched \n"); seq_puts(m, "# || / _---=> hardirq/softirq \n"); seq_puts(m, "# ||| / _--=> preempt-depth \n"); seq_puts(m, "# |||| / delay \n"); seq_puts(m, "# cmd pid ||||| time | caller \n"); seq_puts(m, "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct trace_array *tr, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(tr, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct trace_array *tr, struct seq_file *m) { print_event_info(tr, m); seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); seq_puts(m, "# | | | | |\n"); } static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m) { print_event_info(tr, m); seq_puts(m, "# _-----=> irqs-off\n"); seq_puts(m, "# / _----=> need-resched\n"); seq_puts(m, "# | / _---=> hardirq/softirq\n"); seq_puts(m, "# || / _--=> preempt-depth\n"); seq_puts(m, "# ||| / delay\n"); seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); seq_puts(m, "# | | | |||| | |\n"); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); struct trace_array *tr = iter->tr; struct trace_array_cpu *data = tr->data[tr->cpu]; struct tracer *type = current_trace; unsigned long entries; unsigned long total; const char *name = "preemption"; if (type) name = type->name; get_total_entries(tr, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, UTS_RELEASE); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, tr->cpu, #if defined(CONFIG_PREEMPT_NONE) "server", #elif defined(CONFIG_PREEMPT_VOLUNTARY) "desktop", #elif defined(CONFIG_PREEMPT) "preempt", #else "unknown", #endif /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, data->uid, data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; if (!(trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_test_cpu(iter->cpu, iter->started)) return; if (iter->tr->data[iter->cpu]->skipped_entries) return; cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) { if (!trace_print_lat_context(iter)) goto partial; } else { if (!trace_print_context(iter)) goto partial; } } if (event) return event->funcs->trace(iter, sym_flags, event); if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { if (!trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts)) goto partial; } event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); if (!trace_seq_printf(s, "%d ?\n", entry->type)) goto partial; return TRACE_TYPE_HANDLED; partial: return TRACE_TYPE_PARTIAL_LINE; } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD_RET(s, entry->pid); SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); SEQ_PUT_HEX_FIELD_RET(s, iter->ts); } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD_RET(s, newline); return TRACE_TYPE_HANDLED; } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD_RET(s, entry->pid); SEQ_PUT_FIELD_RET(s, iter->cpu); SEQ_PUT_FIELD_RET(s, iter->ts); } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != TRACE_PIPE_ALL_CPU) { cpu = iter->cpu_file; if (iter->buffer_iter[cpu]) { if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } else { if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) { if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) return 0; } else { if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { enum print_line_t ret; if (iter->lost_events && !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events)) return TRACE_TYPE_PARTIAL_LINE; if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->tr, m); else print_func_help_header(iter->tr, m); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); } static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { print_trace_line(iter); ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file) { long cpu_file = (long) inode->i_private; void *fail_ret = ERR_PTR(-ENOMEM); struct trace_iterator *iter; struct seq_file *m; int cpu, ret; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return ERR_PTR(-ENOMEM); /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ mutex_lock(&trace_types_lock); iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) goto fail; if (current_trace) *iter->trace = *current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; if (current_trace && current_trace->print_max) iter->tr = &max_tr; else iter->tr = &global_trace; iter->pos = -1; mutex_init(&iter->mutex); iter->cpu_file = cpu_file; /* Notify the tracer early; before we stop tracing. */ if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->tr->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* stop the trace while dumping */ tracing_stop(); if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->tr->buffer, cpu); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->tr->buffer, cpu); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } ret = seq_open(file, &tracer_seq_ops); if (ret < 0) { fail_ret = ERR_PTR(ret); goto fail_buffer; } m = file->private_data; m->private = iter; mutex_unlock(&trace_types_lock); return iter; fail_buffer: for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } free_cpumask_var(iter->started); tracing_start(); fail: mutex_unlock(&trace_types_lock); kfree(iter->trace); kfree(iter); return fail_ret; } int tracing_open_generic(struct inode *inode, struct file *filp) { if (tracing_disabled) return -ENODEV; filp->private_data = inode->i_private; return 0; } static int tracing_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) return 0; iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); /* reenable tracing if it was previously enabled */ tracing_start(); mutex_unlock(&trace_types_lock); seq_release(inode, file); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); kfree(iter->trace); kfree(iter); return 0; } static int tracing_open(struct inode *inode, struct file *file) { struct trace_iterator *iter; int ret = 0; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { long cpu = (long) inode->i_private; if (cpu == TRACE_PIPE_ALL_CPU) tracing_reset_online_cpus(&global_trace); else tracing_reset(&global_trace, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } return ret; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct tracer *t = v; (*pos)++; if (t) t = t->next; return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_printf(m, "%s", t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { if (tracing_disabled) return -ENODEV; return seq_open(file, &show_traces_seq_ops); } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } static loff_t tracing_seek(struct file *file, loff_t offset, int origin) { if (file->f_mode & FMODE_READ) return seq_lseek(file, offset, origin); else return 0; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, .llseek = tracing_seek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .release = seq_release, .llseek = seq_lseek, }; /* * Only trace on a CPU if the bitmask is set: */ static cpumask_var_t tracing_cpumask; /* * The tracer itself will not take this lock, but still we want * to provide a consistent cpumask to user-space: */ static DEFINE_MUTEX(tracing_cpumask_update_lock); /* * Temporary storage for the character representation of the * CPU bitmask (and one more byte for the newline): */ static char mask_str[NR_CPUS + 1]; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { int len; mutex_lock(&tracing_cpumask_update_lock); len = cpumask_scnprintf(mask_str, count, tracing_cpumask); if (count - len < 2) { count = -EINVAL; goto out_err; } len += sprintf(mask_str + len, "\n"); count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); out_err: mutex_unlock(&tracing_cpumask_update_lock); return count; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { int err, cpu; cpumask_var_t tracing_cpumask_new; if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_unlock; mutex_lock(&tracing_cpumask_update_lock); local_irq_disable(); arch_spin_lock(&ftrace_max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&global_trace.data[cpu]->disabled); ring_buffer_record_disable_cpu(global_trace.buffer, cpu); } if (!cpumask_test_cpu(cpu, tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&global_trace.data[cpu]->disabled); ring_buffer_record_enable_cpu(global_trace.buffer, cpu); } } arch_spin_unlock(&ftrace_max_lock); local_irq_enable(); cpumask_copy(tracing_cpumask, tracing_cpumask_new); mutex_unlock(&tracing_cpumask_update_lock); free_cpumask_var(tracing_cpumask_new); return count; err_unlock: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; u32 tracer_flags; int i; mutex_lock(&trace_types_lock); tracer_flags = current_trace->flags->val; trace_opts = current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } mutex_unlock(&trace_types_lock); return 0; } static int __set_tracer_option(struct tracer *trace, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { int ret; ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) { struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(trace, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(unsigned int mask, int enabled) { /* do nothing if flag is already set */ if (!!(trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (current_trace->flag_changed) if (current_trace->flag_changed(current_trace, mask, !!enabled)) return -EINVAL; if (enabled) trace_flags |= mask; else trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_OVERWRITE) ring_buffer_change_overwrite(global_trace.buffer, enabled); return 0; } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; char *cmp; int neg = 0; int ret = -ENODEV; int i; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; cmp = strstrip(buf); if (strncmp(cmp, "no", 2) == 0) { neg = 1; cmp += 2; } mutex_lock(&trace_types_lock); for (i = 0; trace_options[i]; i++) { if (strcmp(cmp, trace_options[i]) == 0) { ret = set_tracer_flag(1 << i, !neg); break; } } /* If no option could be set, test the specific tracer options */ if (!trace_options[i]) ret = set_tracer_option(current_trace, cmp, neg); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { if (tracing_disabled) return -ENODEV; return single_open(file, tracing_trace_options_show, NULL); } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "# mount -t debugfs nodev /sys/kernel/debug\n\n" "# cat /sys/kernel/debug/tracing/available_tracers\n" "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" "# cat /sys/kernel/debug/tracing/current_tracer\n" "nop\n" "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" "# cat /sys/kernel/debug/tracing/current_tracer\n" "wakeup\n" "# cat /sys/kernel/debug/tracing/trace_options\n" "noprint-parent nosym-offset nosym-addr noverbose\n" "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n" "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n" ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; static ssize_t tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) { char *buf_comm; char *file_buf; char *buf; int len = 0; int pid; int i; file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL); if (!file_buf) return -ENOMEM; buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL); if (!buf_comm) { kfree(file_buf); return -ENOMEM; } buf = file_buf; for (i = 0; i < SAVED_CMDLINES; i++) { int r; pid = map_cmdline_to_pid[i]; if (pid == -1 || pid == NO_CMDLINE_MAP) continue; trace_find_cmdline(pid, buf_comm); r = sprintf(buf, "%d %s\n", pid, buf_comm); buf += r; len += r; } len = simple_read_from_buffer(ubuf, cnt, ppos, file_buf, len); kfree(file_buf); kfree(buf_comm); return len; } static const struct file_operations tracing_saved_cmdlines_fops = { .open = tracing_open_generic, .read = tracing_saved_cmdlines_read, .llseek = generic_file_llseek, }; static ssize_t tracing_ctrl_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = sprintf(buf, "%u\n", tracer_enabled); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_ctrl_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; val = !!val; mutex_lock(&trace_types_lock); if (tracer_enabled ^ val) { /* Only need to warn if this is used to change the state */ WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on"); if (val) { tracer_enabled = 1; if (current_trace->start) current_trace->start(tr); tracing_start(); } else { tracer_enabled = 0; tracing_stop(); if (current_trace->stop) current_trace->stop(tr); } } mutex_unlock(&trace_types_lock); *ppos += cnt; return cnt; } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); if (current_trace) r = sprintf(buf, "%s\n", current_trace->name); else r = sprintf(buf, "\n"); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(tr); return t->init(tr); } static int __tracing_resize_ring_buffer(unsigned long size) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ ring_buffer_expanded = 1; ret = ring_buffer_resize(global_trace.buffer, size); if (ret < 0) return ret; if (!current_trace->use_max_tr) goto out; ret = ring_buffer_resize(max_tr.buffer, size); if (ret < 0) { int r; r = ring_buffer_resize(global_trace.buffer, global_trace.entries); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } return ret; } max_tr.entries = size; out: global_trace.entries = size; trace_buf_size = size; return ret; } unsigned long tracing_get_trace_buf_size(void) { return trace_buf_size; } static ssize_t tracing_resize_ring_buffer(unsigned long size) { int cpu, ret = size; mutex_lock(&trace_types_lock); tracing_stop(); /* disable all cpu buffers */ for_each_tracing_cpu(cpu) { if (global_trace.data[cpu]) atomic_inc(&global_trace.data[cpu]->disabled); if (max_tr.data[cpu]) atomic_inc(&max_tr.data[cpu]->disabled); } if (size != global_trace.entries) ret = __tracing_resize_ring_buffer(size); if (ret < 0) ret = -ENOMEM; for_each_tracing_cpu(cpu) { if (global_trace.data[cpu]) atomic_dec(&global_trace.data[cpu]->disabled); if (max_tr.data[cpu]) atomic_dec(&max_tr.data[cpu]->disabled); } tracing_start(); mutex_unlock(&trace_types_lock); return ret; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(void) { int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) ret = __tracing_resize_ring_buffer(trace_buf_size); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static struct trace_option_dentry * create_trace_option_files(struct tracer *tracer); static void destroy_trace_option_files(struct trace_option_dentry *topts); static int tracing_set_tracer(const char *buf) { static struct trace_option_dentry *topts; struct trace_array *tr = &global_trace; struct tracer *t; int ret = 0; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(trace_buf_size); if (ret < 0) goto out; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) { ret = -EINVAL; goto out; } if (t == current_trace) goto out; trace_branch_disable(); current_trace->enabled = false; if (current_trace && current_trace->reset) current_trace->reset(tr); if (current_trace && current_trace->use_max_tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_resize(max_tr.buffer, 1); max_tr.entries = 1; } destroy_trace_option_files(topts); current_trace = t; topts = create_trace_option_files(current_trace); if (current_trace->use_max_tr) { ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); if (ret < 0) goto out; max_tr.entries = global_trace.entries; } if (t->init) { ret = tracer_init(t, tr); if (ret) goto out; } current_trace->enabled = true; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); return ret; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; /* strip ending whitespace. */ for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; err = tracing_set_tracer(buf); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *ptr = filp->private_data; char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long *ptr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static int tracing_open_pipe(struct inode *inode, struct file *filp) { long cpu_file = (long) inode->i_private; struct trace_iterator *iter; int ret = 0; if (tracing_disabled) return -ENODEV; mutex_lock(&trace_types_lock); /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; goto out; } /* * We make a copy of the current tracer to avoid concurrent * changes on it while we are reading. */ iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); if (!iter->trace) { ret = -ENOMEM; goto fail; } if (current_trace) *iter->trace = *current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; iter->cpu_file = cpu_file; iter->tr = &global_trace; mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); out: mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter->trace); kfree(iter); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; mutex_lock(&trace_types_lock); if (iter->trace->pipe_close) iter->trace->pipe_close(iter); mutex_unlock(&trace_types_lock); free_cpumask_var(iter->started); mutex_destroy(&iter->mutex); kfree(iter->trace); kfree(iter); return 0; } static unsigned int tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; if (trace_flags & TRACE_ITER_BLOCK) { /* * Always select as readable when in blocking mode */ return POLLIN | POLLRDNORM; } else { if (!trace_empty(iter)) return POLLIN | POLLRDNORM; poll_wait(filp, &trace_wait, poll_table); if (!trace_empty(iter)) return POLLIN | POLLRDNORM; return 0; } } void default_wait_pipe(struct trace_iterator *iter) { DEFINE_WAIT(wait); prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); if (trace_empty(iter)) schedule(); finish_wait(&trace_wait, &wait); } /* * This is a make-shift waitqueue. * A tracer might use this callback on some rare cases: * * 1) the current tracer might hold the runqueue lock when it wakes up * a reader, hence a deadlock (sched, function, and function graph tracers) * 2) the function tracers, trace all functions, we don't want * the overhead of calling wake_up and friends * (and tracing them too) * * Anyway, this is really very primitive wakeup. */ void poll_wait_pipe(struct trace_iterator *iter) { set_current_state(TASK_INTERRUPTIBLE); /* sleep for 100 msecs, and try again. */ schedule_timeout(HZ / 10); } /* Must be called with trace_types_lock mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } mutex_unlock(&iter->mutex); iter->trace->wait_pipe(iter); mutex_lock(&iter->mutex); if (signal_pending(current)) return -EINTR; /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_enabled && iter->pos) break; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; static struct tracer *old_tracer; ssize_t sret; /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) return sret; trace_seq_init(&iter->seq); /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); if (unlikely(old_tracer != current_trace && current_trace)) { old_tracer = current_trace; *iter->trace = *current_trace; } mutex_unlock(&trace_types_lock); /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ mutex_lock(&iter->mutex); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) goto out; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) goto out; /* stop when tracing is finished */ if (trace_empty(iter)) { sret = 0; goto out; } if (cnt >= PAGE_SIZE) cnt = PAGE_SIZE - 1; /* reset all but tr, trace, and overruns */ memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter->pos = -1; trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int len = iter->seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* don't print partial lines */ iter->seq.len = len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (iter->seq.len >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= iter->seq.len) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; out: mutex_unlock(&iter->mutex); return sret; } static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { __free_page(buf->page); } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static const struct pipe_buf_operations tracing_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = tracing_pipe_buf_release, .steal = generic_pipe_buf_steal, .get = generic_pipe_buf_get, }; static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { count = iter->seq.len; ret = print_trace_line(iter); count = iter->seq.len - count; if (rem < count) { rem = 0; iter->seq.len -= count; break; } if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.len -= count; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; static struct tracer *old_tracer; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; /* copy the tracer to avoid using a global lock all around */ mutex_lock(&trace_types_lock); if (unlikely(old_tracer != current_trace && current_trace)) { old_tracer = current_trace; *iter->trace = *current_trace; } mutex_unlock(&trace_types_lock); mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < pipe->buffers && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), iter->seq.len); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = iter->seq.len; trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[96]; int r; mutex_lock(&trace_types_lock); if (!ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", tr->entries >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", tr->entries >> 10); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(val); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += tr->entries >> 10; if (!ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { /* disable tracing ? */ if (trace_flags & TRACE_ITER_STOP_ON_FREE) tracing_off(); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(0); return 0; } static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { unsigned long addr = (unsigned long)ubuf; struct ring_buffer_event *event; struct ring_buffer *buffer; struct print_entry *entry; unsigned long irq_flags; struct page *pages[2]; int nr_pages = 1; ssize_t written; void *page1; void *page2 = NULL; int offset; int size; int len; int ret; if (tracing_disabled) return -EINVAL; if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; /* * Userspace is injecting traces into the kernel trace buffer. * We want to be as non intrusive as possible. * To do so, we do not want to allocate any special buffers * or take any locks, but instead write the userspace data * straight into the ring buffer. * * First we need to pin the userspace buffer into memory, * which, most likely it is, because it just referenced it. * But there's no guarantee that it is. By using get_user_pages_fast() * and kmap_atomic/kunmap_atomic() we can get access to the * pages directly. We then write the data directly into the * ring buffer. */ BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE); /* check if we cross pages */ if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK)) nr_pages = 2; offset = addr & (PAGE_SIZE - 1); addr &= PAGE_MASK; ret = get_user_pages_fast(addr, nr_pages, 0, pages); if (ret < nr_pages) { while (--ret >= 0) put_page(pages[ret]); written = -EFAULT; goto out; } page1 = kmap_atomic(pages[0]); if (nr_pages == 2) page2 = kmap_atomic(pages[1]); local_save_flags(irq_flags); size = sizeof(*entry) + cnt + 2; /* possible \n added */ buffer = global_trace.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, irq_flags, preempt_count()); if (!event) { /* Ring buffer disabled, return as if not open for write */ written = -EBADF; goto out_unlock; } entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; if (nr_pages == 2) { len = PAGE_SIZE - offset; memcpy(&entry->buf, page1 + offset, len); memcpy(&entry->buf[len], page2, cnt - len); } else memcpy(&entry->buf, page1 + offset, cnt); if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2); } else { entry->buf[cnt] = '\0'; stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1); } ring_buffer_unlock_commit(buffer, event); written = cnt; *fpos += written; out_unlock: if (nr_pages == 2) kunmap_atomic(page2); kunmap_atomic(page1); while (nr_pages > 0) put_page(pages[--nr_pages]); out: return written; } static int tracing_clock_show(struct seq_file *m, void *v) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == trace_clock_id ? "[" : "", trace_clocks[i].name, i == trace_clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { char buf[64]; const char *clockstr; int i; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; trace_clock_id = i; mutex_lock(&trace_types_lock); ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func); if (max_tr.buffer) ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); mutex_unlock(&trace_types_lock); *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { if (tracing_disabled) return -ENODEV; return single_open(file, tracing_clock_show, NULL); } static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_ctrl_fops = { .open = tracing_open_generic, .read = tracing_ctrl_read, .write = tracing_ctrl_write, .llseek = generic_file_llseek, }; static const struct file_operations set_tracer_fops = { .open = tracing_open_generic, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, .llseek = no_llseek, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic, .read = tracing_total_entries_read, .llseek = generic_file_llseek, }; static const struct file_operations tracing_free_buffer_fops = { .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_open_generic, .write = tracing_mark_write, .llseek = generic_file_llseek, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = tracing_clock_write, }; struct ftrace_buffer_info { struct trace_array *tr; void *spare; int cpu; unsigned int read; }; static int tracing_buffers_open(struct inode *inode, struct file *filp) { int cpu = (int)(long)inode->i_private; struct ftrace_buffer_info *info; if (tracing_disabled) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->tr = &global_trace; info->cpu = cpu; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; return nonseekable_open(inode, filp); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; ssize_t ret; size_t size; if (!count) return 0; if (!info->spare) info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu); if (!info->spare) return -ENOMEM; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) goto read; trace_access_lock(info->cpu); ret = ring_buffer_read_page(info->tr->buffer, &info->spare, count, info->cpu, 0); trace_access_unlock(info->cpu); if (ret < 0) return 0; info->read = 0; read: size = PAGE_SIZE - info->read; if (size > count) size = count; ret = copy_to_user(ubuf, info->spare + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; if (info->spare) ring_buffer_free_read_page(info->tr->buffer, info->spare); kfree(info); return 0; } struct buffer_ref { struct ring_buffer *buffer; void *page; int ref; }; static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->page); kfree(ref); buf->private = 0; } static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { return 1; } static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; ref->ref++; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .can_merge = 0, .map = generic_pipe_buf_map, .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = buffer_pipe_buf_steal, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; if (--ref->ref) return; ring_buffer_free_read_page(ref->buffer, ref->page); kfree(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; int entries, size, i; size_t ret; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; if (*ppos & (PAGE_SIZE - 1)) { WARN_ONCE(1, "Ftrace: previous read must page-align\n"); ret = -EINVAL; goto out; } if (len & (PAGE_SIZE - 1)) { WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); if (len < PAGE_SIZE) { ret = -EINVAL; goto out; } len &= PAGE_MASK; } trace_access_lock(info->cpu); entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) break; ref->ref = 1; ref->buffer = info->tr->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu); if (!ref->page) { kfree(ref); break; } r = ring_buffer_read_page(ref->buffer, &ref->page, len, info->cpu, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->page); kfree(ref); break; } /* * zero out any left over data, this is going to * user land. */ size = ring_buffer_page_len(ref->page); if (size < PAGE_SIZE) memset(ref->page + size, 0, PAGE_SIZE - size); page = virt_to_page(ref->page); spd.pages[i] = page; spd.partial[i].len = PAGE_SIZE; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += PAGE_SIZE; entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); } trace_access_unlock(info->cpu); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (flags & SPLICE_F_NONBLOCK) ret = -EAGAIN; else ret = 0; /* TODO: block */ goto out; } ret = splice_to_pipe(pipe, &spd); splice_shrink_spd(&spd); out: return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, .llseek = no_llseek, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { unsigned long cpu = (unsigned long)filp->private_data; struct trace_array *tr = &global_trace; struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(tr->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(tr->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(tr->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic, .read = tracing_stats_read, .llseek = generic_file_llseek, }; #ifdef CONFIG_DYNAMIC_FTRACE int __weak ftrace_arch_read_dyn_info(char *buf, int size) { return 0; } static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { static char ftrace_dyn_info_buffer[1024]; static DEFINE_MUTEX(dyn_info_mutex); unsigned long *p = filp->private_data; char *buf = ftrace_dyn_info_buffer; int size = ARRAY_SIZE(ftrace_dyn_info_buffer); int r; mutex_lock(&dyn_info_mutex); r = sprintf(buf, "%ld ", *p); r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); buf[r++] = '\n'; r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); mutex_unlock(&dyn_info_mutex); return r; } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif static struct dentry *d_tracer; struct dentry *tracing_init_dentry(void) { static int once; if (d_tracer) return d_tracer; if (!debugfs_initialized()) return NULL; d_tracer = debugfs_create_dir("tracing", NULL); if (!d_tracer && !once) { once = 1; pr_warning("Could not create debugfs directory 'tracing'\n"); return NULL; } return d_tracer; } static struct dentry *d_percpu; struct dentry *tracing_dentry_percpu(void) { static int once; struct dentry *d_tracer; if (d_percpu) return d_percpu; d_tracer = tracing_init_dentry(); if (!d_tracer) return NULL; d_percpu = debugfs_create_dir("per_cpu", d_tracer); if (!d_percpu && !once) { once = 1; pr_warning("Could not create debugfs directory 'per_cpu'\n"); return NULL; } return d_percpu; } static void tracing_init_debugfs_percpu(long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = debugfs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_file("trace_pipe", 0444, d_cpu, (void *) cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_file("trace", 0644, d_cpu, (void *) cpu, &tracing_fops); trace_create_file("trace_pipe_raw", 0444, d_cpu, (void *) cpu, &tracing_buffers_fops); trace_create_file("stats", 0444, d_cpu, (void *) cpu, &tracing_stats_fops); } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif struct trace_option_dentry { struct tracer_opt *opt; struct tracer_flags *flags; struct dentry *entry; }; static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(current_trace, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static const struct file_operations trace_options_fops = { .open = tracing_open_generic, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, }; static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { long index = (long)filp->private_data; char *buf; if (trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { long index = (long)filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&trace_types_lock); ret = set_tracer_flag(1 << index, val); mutex_unlock(&trace_types_lock); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = debugfs_create_file(name, mode, parent, data, fops); if (!ret) pr_warning("Could not create debugfs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(void) { struct dentry *d_tracer; static struct dentry *t_options; if (t_options) return t_options; d_tracer = tracing_init_dentry(); if (!d_tracer) return NULL; t_options = debugfs_create_dir("options", d_tracer); if (!t_options) { pr_warning("Could not create debugfs directory 'options'\n"); return NULL; } return t_options; } static void create_trace_option_file(struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->entry = trace_create_file(opt->name, 0644, t_options, topt, &trace_options_fops); } static struct trace_option_dentry * create_trace_option_files(struct tracer *tracer) { struct trace_option_dentry *topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; if (!tracer) return NULL; flags = tracer->flags; if (!flags || !flags->opts) return NULL; opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return NULL; for (cnt = 0; opts[cnt].name; cnt++) create_trace_option_file(&topts[cnt], flags, &opts[cnt]); return topts; } static void destroy_trace_option_files(struct trace_option_dentry *topts) { int cnt; if (!topts) return; for (cnt = 0; topts[cnt].opt; cnt++) { if (topts[cnt].entry) debugfs_remove(topts[cnt].entry); } kfree(topts); } static struct dentry * create_trace_option_core_file(const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(); if (!t_options) return NULL; return trace_create_file(option, 0644, t_options, (void *)index, &trace_options_core_fops); } static __init void create_trace_options_dir(void) { struct dentry *t_options; int i; t_options = trace_options_init_dentry(); if (!t_options) return; for (i = 0; trace_options[i]; i++) create_trace_option_core_file(trace_options[i], i); } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->buffer; char buf[64]; int r; if (buffer) r = ring_buffer_record_is_on(buffer); else r = 0; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct ring_buffer *buffer = tr->buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { if (val) ring_buffer_record_on(buffer); else ring_buffer_record_off(buffer); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic, .read = rb_simple_read, .write = rb_simple_write, .llseek = default_llseek, }; static __init int tracer_init_debugfs(void) { struct dentry *d_tracer; int cpu; trace_access_lock_init(); d_tracer = tracing_init_dentry(); trace_create_file("tracing_enabled", 0644, d_tracer, &global_trace, &tracing_ctrl_fops); trace_create_file("trace_options", 0644, d_tracer, NULL, &tracing_iter_fops); trace_create_file("tracing_cpumask", 0644, d_tracer, NULL, &tracing_cpumask_fops); trace_create_file("trace", 0644, d_tracer, (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); trace_create_file("available_tracers", 0444, d_tracer, &global_trace, &show_traces_fops); trace_create_file("current_tracer", 0644, d_tracer, &global_trace, &set_tracer_fops); #ifdef CONFIG_TRACER_MAX_TRACE trace_create_file("tracing_max_latency", 0644, d_tracer, &tracing_max_latency, &tracing_max_lat_fops); #endif trace_create_file("tracing_thresh", 0644, d_tracer, &tracing_thresh, &tracing_max_lat_fops); trace_create_file("README", 0444, d_tracer, NULL, &tracing_readme_fops); trace_create_file("trace_pipe", 0444, d_tracer, (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); trace_create_file("buffer_size_kb", 0644, d_tracer, &global_trace, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", 0444, d_tracer, &global_trace, &tracing_total_entries_fops); trace_create_file("free_buffer", 0644, d_tracer, &global_trace, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, NULL, &tracing_mark_fops); trace_create_file("saved_cmdlines", 0444, d_tracer, NULL, &tracing_saved_cmdlines_fops); trace_create_file("trace_clock", 0644, d_tracer, NULL, &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, &global_trace, &rb_simple_fops); #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif create_trace_options_dir(); for_each_tracing_cpu(cpu) tracing_init_debugfs_percpu(cpu); return 0; } static int trace_panic_handler(struct notifier_block *this, unsigned long event, void *unused) { if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); return NOTIFY_OK; } static struct notifier_block trace_panic_notifier = { .notifier_call = trace_panic_handler, .next = NULL, .priority = 150 /* priority: INT_MAX >= x >= 0 */ }; static int trace_die_handler(struct notifier_block *self, unsigned long val, void *data) { switch (val) { case DIE_OOPS: if (ftrace_dump_on_oops) ftrace_dump(ftrace_dump_on_oops); break; default: break; } return NOTIFY_OK; } static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_handler, .priority = 200 }; /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->len >= 1000) s->len = 1000; /* should be zero ended, but we are paranoid. */ s->buffer[s->len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } void trace_init_global_iter(struct trace_iterator *iter) { iter->tr = &global_trace; iter->trace = current_trace; iter->cpu_file = TRACE_PIPE_ALL_CPU; } static void __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) { static arch_spinlock_t ftrace_dump_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; static int dump_ran; unsigned long flags; int cnt = 0, cpu; /* only one dump */ local_irq_save(flags); arch_spin_lock(&ftrace_dump_lock); if (dump_ran) goto out; dump_ran = 1; tracing_off(); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } if (disable_tracing) ftrace_kill(); trace_init_global_iter(&iter); for_each_tracing_cpu(cpu) { atomic_inc(&iter.tr->data[cpu]->disabled); } old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ trace_flags &= ~TRACE_ITER_SYM_USEROBJ; /* Simulate the iterator */ iter.tr = &global_trace; iter.trace = current_trace; switch (oops_dump_mode) { case DUMP_ALL: iter.cpu_file = TRACE_PIPE_ALL_CPU; break; case DUMP_ORIG: iter.cpu_file = raw_smp_processor_id(); break; case DUMP_NONE: goto out_enable; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); iter.cpu_file = TRACE_PIPE_ALL_CPU; } printk(KERN_TRACE "Dumping ftrace buffer:\n"); /* * We need to stop all tracing on all CPUS to read the * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; /* reset all but tr, trace, and overruns */ memset(&iter.seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.pos = -1; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); out_enable: /* Re-enable tracing if requested */ if (!disable_tracing) { trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&iter.tr->data[cpu]->disabled); } tracing_on(); } out: arch_spin_unlock(&ftrace_dump_lock); local_irq_restore(flags); } /* By default: disable tracing after the dump */ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { __ftrace_dump(true, oops_dump_mode); } EXPORT_SYMBOL_GPL(ftrace_dump); __init static int tracer_alloc_buffers(void) { int ring_buf_size; enum ring_buffer_flags rb_flags; int i; int ret = -ENOMEM; if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* To save memory, keep the ring buffer size to its minimum */ if (ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_cpumask, cpu_all_mask); /* TODO: make the number of buffers hot pluggable with CPUS */ global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags); if (!global_trace.buffer) { printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); WARN_ON(1); goto out_free_cpumask; } global_trace.entries = ring_buffer_size(global_trace.buffer); if (global_trace.buffer_disabled) tracing_off(); #ifdef CONFIG_TRACER_MAX_TRACE max_tr.buffer = ring_buffer_alloc(1, rb_flags); if (!max_tr.buffer) { printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); WARN_ON(1); ring_buffer_free(global_trace.buffer); goto out_free_cpumask; } max_tr.entries = 1; #endif /* Allocate the first page for all buffers */ for_each_tracing_cpu(i) { global_trace.data[i] = &per_cpu(global_trace_cpu, i); max_tr.data[i] = &per_cpu(max_tr_data, i); } trace_init_cmdlines(); register_tracer(&nop_trace); current_trace = &nop_trace; /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); return 0; out_free_cpumask: free_cpumask_var(tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } __init static int clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return 0; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; return 0; } early_initcall(tracer_alloc_buffers); fs_initcall(tracer_init_debugfs); late_initcall(clear_boot_tracer);
gpl-2.0
VanirAOSP/kernel_amlogic_mbx12122012
drivers/misc/mpu3050/mlsl-kernel.c
20
9649
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include "mlsl.h" #include "mpu-i2c.h" /* ------------ */ /* - Defines. - */ /* ------------ */ /* ---------------------- */ /* - Types definitions. - */ /* ---------------------- */ /* --------------------- */ /* - Function p-types. - */ /* --------------------- */ /** * @brief used to open the I2C or SPI serial port. * This port is used to send and receive data to the MPU device. * @param portNum * The COM port number associated with the device in use. * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialOpen(char const *port, void ** sl_handle) { return ML_SUCCESS; } /** * @brief used to reset any buffering the driver may be doing * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialReset(void *sl_handle) { return ML_SUCCESS; } /** * @brief used to close the I2C or SPI serial port. * This port is used to send and receive data to the MPU device. * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialClose(void *sl_handle) { return ML_SUCCESS; } /** * @brief used to read a single byte of data. * This should be sent by I2C or SPI. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to read. * @param data Single byte of data to read. * * @return ML_SUCCESS if the command is successful, an error code otherwise. */ tMLError MLSLSerialWriteSingle(void *sl_handle, unsigned char slaveAddr, unsigned char registerAddr, unsigned char data) { return sensor_i2c_write_register((struct i2c_adapter *) sl_handle, slaveAddr, registerAddr, data); } /** * @brief used to write multiple bytes of data from registers. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to write. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return ML_SUCCESS if successful, a non-zero error code otherwise. */ tMLError MLSLSerialWrite(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char const *data) { tMLError result; const unsigned short dataLength = length - 1; const unsigned char startRegAddr = data[0]; unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1]; unsigned short bytesWritten = 0; while (bytesWritten < dataLength) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, dataLength - bytesWritten); if (bytesWritten == 0) { result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, 1 + thisLen, data); } else { /* manually increment register addr between chunks */ i2cWrite[0] = startRegAddr + bytesWritten; memcpy(&i2cWrite[1], &data[1 + bytesWritten], thisLen); result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, 1 + thisLen, i2cWrite); } if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from registers. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param registerAddr Register address to read. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialRead(void *sl_handle, unsigned char slaveAddr, unsigned char registerAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if (registerAddr == MPUREG_FIFO_R_W || registerAddr == MPUREG_MEM_R_W) { return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = sensor_i2c_read((struct i2c_adapter *) sl_handle, slaveAddr, registerAddr + bytesRead, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @brief used to write multiple bytes of data to the memory. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param memAddr The location in the memory to write to. * @param length Length of burst data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialWriteMem(void *sl_handle, unsigned char slaveAddr, unsigned short memAddr, unsigned short length, unsigned char const *data) { tMLError result; unsigned short bytesWritten = 0; if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) { printk ("memory read length (%d B) extends beyond its limits (%d) " "if started at location %d\n", length, MPU_MEM_BANK_SIZE, memAddr & 0xFF); return ML_ERROR_INVALID_PARAMETER; } while (bytesWritten < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten); result = mpu_memory_write((struct i2c_adapter *) sl_handle, slaveAddr, memAddr + bytesWritten, thisLen, &data[bytesWritten]); if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from the memory. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param memAddr The location in the memory to read from. * @param length Length of burst data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialReadMem(void *sl_handle, unsigned char slaveAddr, unsigned short memAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if ((memAddr & 0xFF) + length > MPU_MEM_BANK_SIZE) { printk ("memory read length (%d B) extends beyond its limits (%d) " "if started at location %d\n", length, MPU_MEM_BANK_SIZE, memAddr & 0xFF); return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = mpu_memory_read((struct i2c_adapter *) sl_handle, slaveAddr, memAddr + bytesRead, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @brief used to write multiple bytes of data to the fifo. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialWriteFifo(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char const *data) { tMLError result; unsigned char i2cWrite[SERIAL_MAX_TRANSFER_SIZE + 1]; unsigned short bytesWritten = 0; if (length > FIFO_HW_SIZE) { printk(KERN_ERR "maximum fifo write length is %d\n", FIFO_HW_SIZE); return ML_ERROR_INVALID_PARAMETER; } while (bytesWritten < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesWritten); i2cWrite[0] = MPUREG_FIFO_R_W; memcpy(&i2cWrite[1], &data[bytesWritten], thisLen); result = sensor_i2c_write((struct i2c_adapter *) sl_handle, slaveAddr, thisLen + 1, i2cWrite); if (ML_SUCCESS != result) return result; bytesWritten += thisLen; } return ML_SUCCESS; } /** * @brief used to read multiple bytes of data from the fifo. * This should be sent by I2C. * * @param slaveAddr I2C slave address of device. * @param length Length of burst of data. * @param data Pointer to block of data. * * @return Zero if successful; an error code otherwise */ tMLError MLSLSerialReadFifo(void *sl_handle, unsigned char slaveAddr, unsigned short length, unsigned char *data) { tMLError result; unsigned short bytesRead = 0; if (length > FIFO_HW_SIZE) { printk(KERN_ERR "maximum fifo read length is %d\n", FIFO_HW_SIZE); return ML_ERROR_INVALID_PARAMETER; } while (bytesRead < length) { unsigned short thisLen = min(SERIAL_MAX_TRANSFER_SIZE, length - bytesRead); result = sensor_i2c_read((struct i2c_adapter *) sl_handle, slaveAddr, MPUREG_FIFO_R_W, thisLen, &data[bytesRead]); if (ML_SUCCESS != result) return result; bytesRead += thisLen; } return ML_SUCCESS; } /** * @} */
gpl-2.0
xbmc/android
lib/libUPnP/Neptune/Source/Tests/FileTest2/FileTest2.cpp
20
3993
/***************************************************************** | | File Test Program 2 | | (c) 2005-2008 Gilles Boccon-Gibod | Author: Gilles Boccon-Gibod (bok@bok.net) | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "Neptune.h" #include "NptDebug.h" /*---------------------------------------------------------------------- | FileTypeName +---------------------------------------------------------------------*/ static const char* FileTypeName(NPT_FileInfo::FileType type) { switch (type) { case NPT_FileInfo::FILE_TYPE_NONE: return "NONE"; case NPT_FileInfo::FILE_TYPE_REGULAR: return "REGULAR"; case NPT_FileInfo::FILE_TYPE_DIRECTORY: return "DIRECTORY"; case NPT_FileInfo::FILE_TYPE_SPECIAL: return "SPECIAL"; case NPT_FileInfo::FILE_TYPE_OTHER: return "OTHER"; default: return "??"; } } /*---------------------------------------------------------------------- | WalkDir +---------------------------------------------------------------------*/ static NPT_Result WalkDir(const char* path, unsigned int indent) { NPT_FileInfo info; NPT_Result result = NPT_File::GetInfo(path, &info); if (NPT_FAILED(result)) { fprintf(stderr, "ERROR: NPT_File::GetInfo(\"%s\") returned %d (%s)\n", path, result, NPT_ResultText(result)); return result; } if (info.m_Type != NPT_FileInfo::FILE_TYPE_DIRECTORY) { fprintf(stderr, "WARNING: %s is not a directory\n", path); return NPT_SUCCESS; // not fatal } NPT_List<NPT_String> entries; result = NPT_File::ListDir(path, entries); if (NPT_FAILED(result)) { fprintf(stderr, "WARNING: NPT_File::ListDirectory returned %d (%s)\n", result, NPT_ResultText(result)); return NPT_SUCCESS; // not fatal } for (NPT_List<NPT_String>::Iterator entries_iterator = entries.GetFirstItem(); entries_iterator; ++entries_iterator) { if (*entries_iterator == "." || *entries_iterator == "..") continue; NPT_String child = path; child += NPT_FilePath::Separator; child += *entries_iterator; result = NPT_File::GetInfo(child, &info); if (NPT_FAILED(result)) { fprintf(stderr, "WARNING: NPT_File::GetInfo(%s) returned %d (%s)\n", child.GetChars(), result, NPT_ResultText(result)); continue; } for (unsigned int i=0; i<indent; i++) { printf(" "); } printf("%s: type=%s", child.GetChars(), FileTypeName(info.m_Type)); if (info.m_Type != NPT_FileInfo::FILE_TYPE_DIRECTORY) printf(" size=%lld", info.m_Size); if (info.m_AttributesMask & NPT_FILE_ATTRIBUTE_READ_ONLY && info.m_Attributes & NPT_FILE_ATTRIBUTE_READ_ONLY) { printf(" RO"); } if (info.m_AttributesMask & NPT_FILE_ATTRIBUTE_LINK && info.m_Attributes & NPT_FILE_ATTRIBUTE_LINK) { printf(" LINK"); } printf("\n"); if (info.m_Type == NPT_FileInfo::FILE_TYPE_DIRECTORY) { result = WalkDir(child, indent+1); if (NPT_FAILED(result)) return result; } } return NPT_SUCCESS; } /*---------------------------------------------------------------------- | main +---------------------------------------------------------------------*/ int main(int argc, char** argv) { if (argc != 2) { fprintf(stderr, "ERROR: root directory name not specified\n" "usage: filetest2 <root_path>\n"); return 1; } WalkDir(argv[1], 0); return 0; }
gpl-2.0
dh-electronics/linux-am33x
drivers/net/team/team.c
20
71033
/* * drivers/net/team/team.c - Network team device driver * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/errno.h> #include <linux/ctype.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/if_vlan.h> #include <linux/if_arp.h> #include <linux/socket.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/rtnetlink.h> #include <net/genetlink.h> #include <net/netlink.h> #include <net/sch_generic.h> #include <net/switchdev.h> #include <generated/utsrelease.h> #include <linux/if_team.h> #define DRV_NAME "team" /********** * Helpers **********/ #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT) static struct team_port *team_port_get_rcu(const struct net_device *dev) { return rcu_dereference(dev->rx_handler_data); } static struct team_port *team_port_get_rtnl(const struct net_device *dev) { struct team_port *port = rtnl_dereference(dev->rx_handler_data); return team_port_exists(dev) ? port : NULL; } /* * Since the ability to change device address for open port device is tested in * team_port_add, this function can be called without control of return value */ static int __set_port_dev_addr(struct net_device *port_dev, const unsigned char *dev_addr) { struct sockaddr addr; memcpy(addr.sa_data, dev_addr, port_dev->addr_len); addr.sa_family = port_dev->type; return dev_set_mac_address(port_dev, &addr); } static int team_port_set_orig_dev_addr(struct team_port *port) { return __set_port_dev_addr(port->dev, port->orig.dev_addr); } static int team_port_set_team_dev_addr(struct team *team, struct team_port *port) { return __set_port_dev_addr(port->dev, team->dev->dev_addr); } int team_modeop_port_enter(struct team *team, struct team_port *port) { return team_port_set_team_dev_addr(team, port); } EXPORT_SYMBOL(team_modeop_port_enter); void team_modeop_port_change_dev_addr(struct team *team, struct team_port *port) { team_port_set_team_dev_addr(team, port); } EXPORT_SYMBOL(team_modeop_port_change_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { port->linkup = port->user.linkup_enabled ? port->user.linkup : port->state.linkup; } /******************* * Options handling *******************/ struct team_option_inst { /* One for each option instance */ struct list_head list; struct list_head tmp_list; struct team_option *option; struct team_option_inst_info info; bool changed; bool removed; }; static struct team_option *__team_find_option(struct team *team, const char *opt_name) { struct team_option *option; list_for_each_entry(option, &team->option_list, list) { if (strcmp(option->name, opt_name) == 0) return option; } return NULL; } static void __team_option_inst_del(struct team_option_inst *opt_inst) { list_del(&opt_inst->list); kfree(opt_inst); } static void __team_option_inst_del_option(struct team *team, struct team_option *option) { struct team_option_inst *opt_inst, *tmp; list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { if (opt_inst->option == option) __team_option_inst_del(opt_inst); } } static int __team_option_inst_add(struct team *team, struct team_option *option, struct team_port *port) { struct team_option_inst *opt_inst; unsigned int array_size; unsigned int i; int err; array_size = option->array_size; if (!array_size) array_size = 1; /* No array but still need one instance */ for (i = 0; i < array_size; i++) { opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); if (!opt_inst) return -ENOMEM; opt_inst->option = option; opt_inst->info.port = port; opt_inst->info.array_index = i; opt_inst->changed = true; opt_inst->removed = false; list_add_tail(&opt_inst->list, &team->option_inst_list); if (option->init) { err = option->init(team, &opt_inst->info); if (err) return err; } } return 0; } static int __team_option_inst_add_option(struct team *team, struct team_option *option) { int err; if (!option->per_port) { err = __team_option_inst_add(team, option, NULL); if (err) goto inst_del_option; } return 0; inst_del_option: __team_option_inst_del_option(team, option); return err; } static void __team_option_inst_mark_removed_option(struct team *team, struct team_option *option) { struct team_option_inst *opt_inst; list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->option == option) { opt_inst->changed = true; opt_inst->removed = true; } } } static void __team_option_inst_del_port(struct team *team, struct team_port *port) { struct team_option_inst *opt_inst, *tmp; list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { if (opt_inst->option->per_port && opt_inst->info.port == port) __team_option_inst_del(opt_inst); } } static int __team_option_inst_add_port(struct team *team, struct team_port *port) { struct team_option *option; int err; list_for_each_entry(option, &team->option_list, list) { if (!option->per_port) continue; err = __team_option_inst_add(team, option, port); if (err) goto inst_del_port; } return 0; inst_del_port: __team_option_inst_del_port(team, port); return err; } static void __team_option_inst_mark_removed_port(struct team *team, struct team_port *port) { struct team_option_inst *opt_inst; list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->info.port == port) { opt_inst->changed = true; opt_inst->removed = true; } } } static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) { int i; struct team_option **dst_opts; int err; dst_opts = kzalloc(sizeof(struct team_option *) * option_count, GFP_KERNEL); if (!dst_opts) return -ENOMEM; for (i = 0; i < option_count; i++, option++) { if (__team_find_option(team, option->name)) { err = -EEXIST; goto alloc_rollback; } dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); if (!dst_opts[i]) { err = -ENOMEM; goto alloc_rollback; } } for (i = 0; i < option_count; i++) { err = __team_option_inst_add_option(team, dst_opts[i]); if (err) goto inst_rollback; list_add_tail(&dst_opts[i]->list, &team->option_list); } kfree(dst_opts); return 0; inst_rollback: for (i--; i >= 0; i--) __team_option_inst_del_option(team, dst_opts[i]); i = option_count - 1; alloc_rollback: for (i--; i >= 0; i--) kfree(dst_opts[i]); kfree(dst_opts); return err; } static void __team_options_mark_removed(struct team *team, const struct team_option *option, size_t option_count) { int i; for (i = 0; i < option_count; i++, option++) { struct team_option *del_opt; del_opt = __team_find_option(team, option->name); if (del_opt) __team_option_inst_mark_removed_option(team, del_opt); } } static void __team_options_unregister(struct team *team, const struct team_option *option, size_t option_count) { int i; for (i = 0; i < option_count; i++, option++) { struct team_option *del_opt; del_opt = __team_find_option(team, option->name); if (del_opt) { __team_option_inst_del_option(team, del_opt); list_del(&del_opt->list); kfree(del_opt); } } } static void __team_options_change_check(struct team *team); int team_options_register(struct team *team, const struct team_option *option, size_t option_count) { int err; err = __team_options_register(team, option, option_count); if (err) return err; __team_options_change_check(team); return 0; } EXPORT_SYMBOL(team_options_register); void team_options_unregister(struct team *team, const struct team_option *option, size_t option_count) { __team_options_mark_removed(team, option, option_count); __team_options_change_check(team); __team_options_unregister(team, option, option_count); } EXPORT_SYMBOL(team_options_unregister); static int team_option_get(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { if (!opt_inst->option->getter) return -EOPNOTSUPP; return opt_inst->option->getter(team, ctx); } static int team_option_set(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { if (!opt_inst->option->setter) return -EOPNOTSUPP; return opt_inst->option->setter(team, ctx); } void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info) { struct team_option_inst *opt_inst; opt_inst = container_of(opt_inst_info, struct team_option_inst, info); opt_inst->changed = true; } EXPORT_SYMBOL(team_option_inst_set_change); void team_options_change_check(struct team *team) { __team_options_change_check(team); } EXPORT_SYMBOL(team_options_change_check); /**************** * Mode handling ****************/ static LIST_HEAD(mode_list); static DEFINE_SPINLOCK(mode_list_lock); struct team_mode_item { struct list_head list; const struct team_mode *mode; }; static struct team_mode_item *__find_mode(const char *kind) { struct team_mode_item *mitem; list_for_each_entry(mitem, &mode_list, list) { if (strcmp(mitem->mode->kind, kind) == 0) return mitem; } return NULL; } static bool is_good_mode_name(const char *name) { while (*name != '\0') { if (!isalpha(*name) && !isdigit(*name) && *name != '_') return false; name++; } return true; } int team_mode_register(const struct team_mode *mode) { int err = 0; struct team_mode_item *mitem; if (!is_good_mode_name(mode->kind) || mode->priv_size > TEAM_MODE_PRIV_SIZE) return -EINVAL; mitem = kmalloc(sizeof(*mitem), GFP_KERNEL); if (!mitem) return -ENOMEM; spin_lock(&mode_list_lock); if (__find_mode(mode->kind)) { err = -EEXIST; kfree(mitem); goto unlock; } mitem->mode = mode; list_add_tail(&mitem->list, &mode_list); unlock: spin_unlock(&mode_list_lock); return err; } EXPORT_SYMBOL(team_mode_register); void team_mode_unregister(const struct team_mode *mode) { struct team_mode_item *mitem; spin_lock(&mode_list_lock); mitem = __find_mode(mode->kind); if (mitem) { list_del_init(&mitem->list); kfree(mitem); } spin_unlock(&mode_list_lock); } EXPORT_SYMBOL(team_mode_unregister); static const struct team_mode *team_mode_get(const char *kind) { struct team_mode_item *mitem; const struct team_mode *mode = NULL; spin_lock(&mode_list_lock); mitem = __find_mode(kind); if (!mitem) { spin_unlock(&mode_list_lock); request_module("team-mode-%s", kind); spin_lock(&mode_list_lock); mitem = __find_mode(kind); } if (mitem) { mode = mitem->mode; if (!try_module_get(mode->owner)) mode = NULL; } spin_unlock(&mode_list_lock); return mode; } static void team_mode_put(const struct team_mode *mode) { module_put(mode->owner); } static bool team_dummy_transmit(struct team *team, struct sk_buff *skb) { dev_kfree_skb_any(skb); return false; } static rx_handler_result_t team_dummy_receive(struct team *team, struct team_port *port, struct sk_buff *skb) { return RX_HANDLER_ANOTHER; } static const struct team_mode __team_no_mode = { .kind = "*NOMODE*", }; static bool team_is_mode_set(struct team *team) { return team->mode != &__team_no_mode; } static void team_set_no_mode(struct team *team) { team->user_carrier_enabled = false; team->mode = &__team_no_mode; } static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */ if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit; if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; } /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no * need to set mode operations in any special way. */ static int __team_change_mode(struct team *team, const struct team_mode *new_mode) { /* Check if mode was previously set and do cleanup if so */ if (team_is_mode_set(team)) { void (*exit_op)(struct team *team) = team->ops.exit; /* Clear ops area so no callback is called any longer */ memset(&team->ops, 0, sizeof(struct team_mode_ops)); team_adjust_ops(team); if (exit_op) exit_op(team); team_mode_put(team->mode); team_set_no_mode(team); /* zero private data area */ memset(&team->mode_priv, 0, sizeof(struct team) - offsetof(struct team, mode_priv)); } if (!new_mode) return 0; if (new_mode->ops->init) { int err; err = new_mode->ops->init(team); if (err) return err; } team->mode = new_mode; memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops)); team_adjust_ops(team); return 0; } static int team_change_mode(struct team *team, const char *kind) { const struct team_mode *new_mode; struct net_device *dev = team->dev; int err; if (!list_empty(&team->port_list)) { netdev_err(dev, "No ports can be present during mode change\n"); return -EBUSY; } if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) { netdev_err(dev, "Unable to change to the same mode the team is in\n"); return -EINVAL; } new_mode = team_mode_get(kind); if (!new_mode) { netdev_err(dev, "Mode \"%s\" not found\n", kind); return -EINVAL; } err = __team_change_mode(team, new_mode); if (err) { netdev_err(dev, "Failed to change to mode \"%s\"\n", kind); team_mode_put(new_mode); return err; } netdev_info(dev, "Mode changed to \"%s\"\n", kind); return 0; } /********************* * Peers notification *********************/ static void team_notify_peers_work(struct work_struct *work) { struct team *team; int val; team = container_of(work, struct team, notify_peers.dw.work); if (!rtnl_trylock()) { schedule_delayed_work(&team->notify_peers.dw, 0); return; } val = atomic_dec_if_positive(&team->notify_peers.count_pending); if (val < 0) { rtnl_unlock(); return; } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); if (val) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } static void team_notify_peers(struct team *team) { if (!team->notify_peers.count || !netif_running(team->dev)) return; atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); schedule_delayed_work(&team->notify_peers.dw, 0); } static void team_notify_peers_init(struct team *team) { INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work); } static void team_notify_peers_fini(struct team *team) { cancel_delayed_work_sync(&team->notify_peers.dw); } /******************************* * Send multicast group rejoins *******************************/ static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; int val; team = container_of(work, struct team, mcast_rejoin.dw.work); if (!rtnl_trylock()) { schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); if (val < 0) { rtnl_unlock(); return; } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); if (val) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } static void team_mcast_rejoin(struct team *team) { if (!team->mcast_rejoin.count || !netif_running(team->dev)) return; atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); schedule_delayed_work(&team->mcast_rejoin.dw, 0); } static void team_mcast_rejoin_init(struct team *team) { INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work); } static void team_mcast_rejoin_fini(struct team *team) { cancel_delayed_work_sync(&team->mcast_rejoin.dw); } /************************ * Rx path frame handler ************************/ /* note: already called with rcu_read_lock */ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct team_port *port; struct team *team; rx_handler_result_t res; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED; *pskb = skb; port = team_port_get_rcu(skb->dev); team = port->team; if (!team_port_enabled(port)) { /* allow exact match delivery for disabled ports */ res = RX_HANDLER_EXACT; } else { res = team->ops.receive(team, port, skb); } if (res == RX_HANDLER_ANOTHER) { struct team_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(team->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->rx_packets++; pcpu_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) pcpu_stats->rx_multicast++; u64_stats_update_end(&pcpu_stats->syncp); skb->dev = team->dev; } else { this_cpu_inc(team->pcpu_stats->rx_dropped); } return res; } /************************************* * Multiqueue Tx port select override *************************************/ static int team_queue_override_init(struct team *team) { struct list_head *listarr; unsigned int queue_cnt = team->dev->num_tx_queues - 1; unsigned int i; if (!queue_cnt) return 0; listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); if (!listarr) return -ENOMEM; team->qom_lists = listarr; for (i = 0; i < queue_cnt; i++) INIT_LIST_HEAD(listarr++); return 0; } static void team_queue_override_fini(struct team *team) { kfree(team->qom_lists); } static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) { return &team->qom_lists[queue_id - 1]; } /* * note: already called with rcu_read_lock */ static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) { struct list_head *qom_list; struct team_port *port; if (!team->queue_override_enabled || !skb->queue_mapping) return false; qom_list = __team_get_qom_list(team, skb->queue_mapping); list_for_each_entry_rcu(port, qom_list, qom_list) { if (!team_dev_queue_xmit(team, port, skb)) return true; } return false; } static void __team_queue_override_port_del(struct team *team, struct team_port *port) { if (!port->queue_id) return; list_del_rcu(&port->qom_list); } static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, struct team_port *cur) { if (port->priority < cur->priority) return true; if (port->priority > cur->priority) return false; if (port->index < cur->index) return true; return false; } static void __team_queue_override_port_add(struct team *team, struct team_port *port) { struct team_port *cur; struct list_head *qom_list; struct list_head *node; if (!port->queue_id) return; qom_list = __team_get_qom_list(team, port->queue_id); node = qom_list; list_for_each_entry(cur, qom_list, qom_list) { if (team_queue_override_port_has_gt_prio_than(port, cur)) break; node = &cur->qom_list; } list_add_tail_rcu(&port->qom_list, node); } static void __team_queue_override_enabled_check(struct team *team) { struct team_port *port; bool enabled = false; list_for_each_entry(port, &team->port_list, list) { if (port->queue_id) { enabled = true; break; } } if (enabled == team->queue_override_enabled) return; netdev_dbg(team->dev, "%s queue override\n", enabled ? "Enabling" : "Disabling"); team->queue_override_enabled = enabled; } static void team_queue_override_port_prio_changed(struct team *team, struct team_port *port) { if (!port->queue_id || team_port_enabled(port)) return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } static void team_queue_override_port_change_queue_id(struct team *team, struct team_port *port, u16 new_queue_id) { if (team_port_enabled(port)) { __team_queue_override_port_del(team, port); port->queue_id = new_queue_id; __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } else { port->queue_id = new_queue_id; } } static void team_queue_override_port_add(struct team *team, struct team_port *port) { __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } static void team_queue_override_port_del(struct team *team, struct team_port *port) { __team_queue_override_port_del(team, port); __team_queue_override_enabled_check(team); } /**************** * Port handling ****************/ static bool team_port_find(const struct team *team, const struct team_port *port) { struct team_port *cur; list_for_each_entry(cur, &team->port_list, list) if (cur == port) return true; return false; } /* * Enable/disable port by adding to enabled port hashlist and setting * port->index (Might be racy so reader could see incorrect ifindex when * processing a flying packet, but that is not a problem). Write guarded * by team->lock. */ static void team_port_enable(struct team *team, struct team_port *port) { if (team_port_enabled(port)) return; port->index = team->en_port_count++; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); team_notify_peers(team); team_mcast_rejoin(team); } static void __reconstruct_port_hlist(struct team *team, int rm_index) { int i; struct team_port *port; for (i = rm_index + 1; i < team->en_port_count; i++) { port = team_get_port_by_index(team, i); hlist_del_rcu(&port->hlist); port->index--; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); } } static void team_port_disable(struct team *team, struct team_port *port) { if (!team_port_enabled(port)) return; if (team->ops.port_disabled) team->ops.port_disabled(team, port); hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; team->en_port_count--; team_queue_override_port_del(team, port); team_adjust_ops(team); team_notify_peers(team); team_mcast_rejoin(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) static void __team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; list_for_each_entry(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, port->dev->vlan_features, TEAM_VLAN_FEATURES); dst_release_flag &= port->dev->priv_flags; if (port->dev->hard_header_len > max_hard_header_len) max_hard_header_len = port->dev->hard_header_len; } team->dev->vlan_features = vlan_features; team->dev->hard_header_len = max_hard_header_len; team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); __team_compute_features(team); mutex_unlock(&team->lock); } static int team_port_enter(struct team *team, struct team_port *port) { int err = 0; dev_hold(team->dev); if (team->ops.port_enter) { err = team->ops.port_enter(team, port); if (err) { netdev_err(team->dev, "Device %s failed to enter team mode\n", port->dev->name); goto err_port_enter; } } return 0; err_port_enter: dev_put(team->dev); return err; } static void team_port_leave(struct team *team, struct team_port *port) { if (team->ops.port_leave) team->ops.port_leave(team, port); dev_put(team->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER static int team_port_enable_netpoll(struct team *team, struct team_port *port) { struct netpoll *np; int err; if (!team->dev->npinfo) return 0; np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; err = __netpoll_setup(np, port->dev); if (err) { kfree(np); return err; } port->np = np; return err; } static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; if (!np) return; port->np = NULL; /* Wait for transmitting packets to finish before freeing. */ synchronize_rcu_bh(); __netpoll_cleanup(np); kfree(np); } #else static int team_port_enable_netpoll(struct team *team, struct team_port *port) { return 0; } static void team_port_disable_netpoll(struct team_port *port) { } #endif static int team_upper_dev_link(struct net_device *dev, struct net_device *port_dev) { int err; err = netdev_master_upper_dev_link(port_dev, dev); if (err) return err; port_dev->priv_flags |= IFF_TEAM_PORT; return 0; } static void team_upper_dev_unlink(struct net_device *dev, struct net_device *port_dev) { netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_TEAM_PORT; } static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev); static int team_port_add(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; struct team_port *port; char *portname = port_dev->name; int err; if (port_dev->flags & IFF_LOOPBACK) { netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } if (team_port_exists(port_dev)) { netdev_err(dev, "Device %s is already a port " "of a team device\n", portname); return -EBUSY; } if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", portname); return -EPERM; } err = team_dev_type_check_change(dev, port_dev); if (err) return err; if (port_dev->flags & IFF_UP) { netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); return -EBUSY; } port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size, GFP_KERNEL); if (!port) return -ENOMEM; port->dev = port_dev; port->team = team; INIT_LIST_HEAD(&port->qom_list); port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); if (err) { netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err); goto err_set_mtu; } memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); err = team_port_enter(team, port); if (err) { netdev_err(dev, "Device %s failed to enter team mode\n", portname); goto err_port_enter; } err = dev_open(port_dev); if (err) { netdev_dbg(dev, "Device %s opening failed\n", portname); goto err_dev_open; } err = vlan_vids_add_by_dev(port_dev, dev); if (err) { netdev_err(dev, "Failed to add vlan ids to device %s\n", portname); goto err_vids_add; } err = team_port_enable_netpoll(team, port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); goto err_enable_netpoll; } if (!(dev->features & NETIF_F_LRO)) dev_disable_lro(port_dev); err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { netdev_err(dev, "Device %s failed to register rx_handler\n", portname); goto err_handler_register; } err = team_upper_dev_link(dev, port_dev); if (err) { netdev_err(dev, "Device %s failed to set upper link\n", portname); goto err_set_upper_link; } err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", portname); goto err_option_port_add; } port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); return 0; err_option_port_add: team_upper_dev_unlink(dev, port_dev); err_set_upper_link: netdev_rx_handler_unregister(port_dev); err_handler_register: team_port_disable_netpoll(port); err_enable_netpoll: vlan_vids_del_by_dev(port_dev, dev); err_vids_add: dev_close(port_dev); err_dev_open: team_port_leave(team, port); team_port_set_orig_dev_addr(port); err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); err_set_mtu: kfree(port); return err; } static void __team_port_change_port_removed(struct team_port *port); static int team_port_del(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; struct team_port *port; char *portname = port_dev->name; port = team_port_get_rtnl(port_dev); if (!port || !team_port_find(team, port)) { netdev_err(dev, "Device %s does not act as a port of this team\n", portname); return -ENOENT; } team_port_disable(team, port); list_del_rcu(&port->list); team_upper_dev_unlink(dev, port_dev); netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_uc_unsync(port_dev, dev); dev_mc_unsync(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); __team_option_inst_mark_removed_port(team, port); __team_options_change_check(team); __team_option_inst_del_port(team, port); __team_port_change_port_removed(port); team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team); return 0; } /***************** * Net device ops *****************/ static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.str_val = team->mode->kind; return 0; } static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) { return team_change_mode(team, ctx->data.str_val); } static int team_notify_peers_count_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.count; return 0; } static int team_notify_peers_count_set(struct team *team, struct team_gsetter_ctx *ctx) { team->notify_peers.count = ctx->data.u32_val; return 0; } static int team_notify_peers_interval_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->notify_peers.interval; return 0; } static int team_notify_peers_interval_set(struct team *team, struct team_gsetter_ctx *ctx) { team->notify_peers.interval = ctx->data.u32_val; return 0; } static int team_mcast_rejoin_count_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.count; return 0; } static int team_mcast_rejoin_count_set(struct team *team, struct team_gsetter_ctx *ctx) { team->mcast_rejoin.count = ctx->data.u32_val; return 0; } static int team_mcast_rejoin_interval_get(struct team *team, struct team_gsetter_ctx *ctx) { ctx->data.u32_val = team->mcast_rejoin.interval; return 0; } static int team_mcast_rejoin_interval_set(struct team *team, struct team_gsetter_ctx *ctx) { team->mcast_rejoin.interval = ctx->data.u32_val; return 0; } static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = team_port_enabled(port); return 0; } static int team_port_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; if (ctx->data.bool_val) team_port_enable(team, port); else team_port_disable(team, port); return 0; } static int team_user_linkup_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup; return 0; } static void __team_carrier_check(struct team *team); static int team_user_linkup_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; port->user.linkup = ctx->data.bool_val; team_refresh_port_linkup(port); __team_carrier_check(port->team); return 0; } static int team_user_linkup_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.bool_val = port->user.linkup_enabled; return 0; } static int team_user_linkup_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; port->user.linkup_enabled = ctx->data.bool_val; team_refresh_port_linkup(port); __team_carrier_check(port->team); return 0; } static int team_priority_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.s32_val = port->priority; return 0; } static int team_priority_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; s32 priority = ctx->data.s32_val; if (port->priority == priority) return 0; port->priority = priority; team_queue_override_port_prio_changed(team, port); return 0; } static int team_queue_id_option_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; ctx->data.u32_val = port->queue_id; return 0; } static int team_queue_id_option_set(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; u16 new_queue_id = ctx->data.u32_val; if (port->queue_id == new_queue_id) return 0; if (new_queue_id >= team->dev->real_num_tx_queues) return -EINVAL; team_queue_override_port_change_queue_id(team, port, new_queue_id); return 0; } static const struct team_option team_options[] = { { .name = "mode", .type = TEAM_OPTION_TYPE_STRING, .getter = team_mode_option_get, .setter = team_mode_option_set, }, { .name = "notify_peers_count", .type = TEAM_OPTION_TYPE_U32, .getter = team_notify_peers_count_get, .setter = team_notify_peers_count_set, }, { .name = "notify_peers_interval", .type = TEAM_OPTION_TYPE_U32, .getter = team_notify_peers_interval_get, .setter = team_notify_peers_interval_set, }, { .name = "mcast_rejoin_count", .type = TEAM_OPTION_TYPE_U32, .getter = team_mcast_rejoin_count_get, .setter = team_mcast_rejoin_count_set, }, { .name = "mcast_rejoin_interval", .type = TEAM_OPTION_TYPE_U32, .getter = team_mcast_rejoin_interval_get, .setter = team_mcast_rejoin_interval_set, }, { .name = "enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_port_en_option_get, .setter = team_port_en_option_set, }, { .name = "user_linkup", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_user_linkup_option_get, .setter = team_user_linkup_option_set, }, { .name = "user_linkup_enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, { .name = "priority", .type = TEAM_OPTION_TYPE_S32, .per_port = true, .getter = team_priority_option_get, .setter = team_priority_option_set, }, { .name = "queue_id", .type = TEAM_OPTION_TYPE_U32, .per_port = true, .getter = team_queue_id_option_get, .setter = team_queue_id_option_set, }, }; static struct lock_class_key team_netdev_xmit_lock_key; static struct lock_class_key team_netdev_addr_lock_key; static struct lock_class_key team_tx_busylock_key; static void team_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *unused) { lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); } static void team_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); dev->qdisc_tx_busylock = &team_tx_busylock_key; } static int team_init(struct net_device *dev) { struct team *team = netdev_priv(dev); int i; int err; team->dev = dev; mutex_init(&team->lock); team_set_no_mode(team); team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); if (!team->pcpu_stats) return -ENOMEM; for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); err = team_queue_override_init(team); if (err) goto err_team_queue_override_init; team_adjust_ops(team); INIT_LIST_HEAD(&team->option_list); INIT_LIST_HEAD(&team->option_inst_list); team_notify_peers_init(team); team_mcast_rejoin_init(team); err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) goto err_options_register; netif_carrier_off(dev); team_set_lockdep_class(dev); return 0; err_options_register: team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); err_team_queue_override_init: free_percpu(team->pcpu_stats); return err; } static void team_uninit(struct net_device *dev) { struct team *team = netdev_priv(dev); struct team_port *port; struct team_port *tmp; mutex_lock(&team->lock); list_for_each_entry_safe(port, tmp, &team->port_list, list) team_port_del(team, port->dev); __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); team_mcast_rejoin_fini(team); team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); } static void team_destructor(struct net_device *dev) { struct team *team = netdev_priv(dev); free_percpu(team->pcpu_stats); free_netdev(dev); } static int team_open(struct net_device *dev) { return 0; } static int team_close(struct net_device *dev) { return 0; } /* * note: already called with rcu_read_lock */ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); bool tx_success; unsigned int len = skb->len; tx_success = team_queue_override_transmit(team, skb); if (!tx_success) tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(team->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(team->pcpu_stats->tx_dropped); } return NETDEV_TX_OK; } static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the team driver. */ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; /* * Save the original txq to restore before passing to the driver */ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { txq -= dev->real_num_tx_queues; } while (txq >= dev->real_num_tx_queues); } return txq; } static void team_change_rx_flags(struct net_device *dev, int change) { struct team *team = netdev_priv(dev); struct team_port *port; int inc; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { if (change & IFF_PROMISC) { inc = dev->flags & IFF_PROMISC ? 1 : -1; dev_set_promiscuity(port->dev, inc); } if (change & IFF_ALLMULTI) { inc = dev->flags & IFF_ALLMULTI ? 1 : -1; dev_set_allmulti(port->dev, inc); } } rcu_read_unlock(); } static void team_set_rx_mode(struct net_device *dev) { struct team *team = netdev_priv(dev); struct team_port *port; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { dev_uc_sync_multiple(port->dev, dev); dev_mc_sync_multiple(port->dev, dev); } rcu_read_unlock(); } static int team_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct team *team = netdev_priv(dev); struct team_port *port; if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); mutex_unlock(&team->lock); return 0; } static int team_change_mtu(struct net_device *dev, int new_mtu) { struct team *team = netdev_priv(dev); struct team_port *port; int err; /* * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); team->port_mtu_change_allowed = true; list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); if (err) { netdev_err(dev, "Device %s failed to change mtu", port->dev->name); goto unwind; } } team->port_mtu_change_allowed = false; mutex_unlock(&team->lock); dev->mtu = new_mtu; return 0; unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); team->port_mtu_change_allowed = false; mutex_unlock(&team->lock); return err; } static struct rtnl_link_stats64 * team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct team *team = netdev_priv(dev); struct team_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_dropped = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(team->pcpu_stats, i); do { start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* * rx_dropped & tx_dropped are u32, updated * without syncp protection. */ rx_dropped += p->rx_dropped; tx_dropped += p->tx_dropped; } stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; return stats; } static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; int err; /* * Alhough this is reader, it's guarded by team lock. It's not possible * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { err = vlan_vid_add(port->dev, proto, vid); if (err) goto unwind; } mutex_unlock(&team->lock); return 0; unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); mutex_unlock(&team->lock); return err; } static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct team *team = netdev_priv(dev); struct team_port *port; mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) vlan_vid_del(port->dev, proto, vid); mutex_unlock(&team->lock); return 0; } #ifdef CONFIG_NET_POLL_CONTROLLER static void team_poll_controller(struct net_device *dev) { } static void __team_netpoll_cleanup(struct team *team) { struct team_port *port; list_for_each_entry(port, &team->port_list, list) team_port_disable_netpoll(port); } static void team_netpoll_cleanup(struct net_device *dev) { struct team *team = netdev_priv(dev); mutex_lock(&team->lock); __team_netpoll_cleanup(team); mutex_unlock(&team->lock); } static int team_netpoll_setup(struct net_device *dev, struct netpoll_info *npifo) { struct team *team = netdev_priv(dev); struct team_port *port; int err = 0; mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { err = team_port_enable_netpoll(team, port); if (err) { __team_netpoll_cleanup(team); break; } } mutex_unlock(&team->lock); return err; } #endif static int team_add_slave(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); int err; mutex_lock(&team->lock); err = team_port_add(team, port_dev); mutex_unlock(&team->lock); return err; } static int team_del_slave(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); int err; mutex_lock(&team->lock); err = team_port_del(team, port_dev); mutex_unlock(&team->lock); return err; } static netdev_features_t team_fix_features(struct net_device *dev, netdev_features_t features) { struct team_port *port; struct team *team = netdev_priv(dev); netdev_features_t mask; mask = features; features &= ~NETIF_F_ONE_FOR_ALL; features |= NETIF_F_ALL_FOR_ALL; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) { features = netdev_increment_features(features, port->dev->features, mask); } rcu_read_unlock(); features = netdev_add_tso_features(features, mask); return features; } static int team_change_carrier(struct net_device *dev, bool new_carrier) { struct team *team = netdev_priv(dev); team->user_carrier_enabled = true; if (new_carrier) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } static const struct net_device_ops team_netdev_ops = { .ndo_init = team_init, .ndo_uninit = team_uninit, .ndo_open = team_open, .ndo_stop = team_close, .ndo_start_xmit = team_xmit, .ndo_select_queue = team_select_queue, .ndo_change_rx_flags = team_change_rx_flags, .ndo_set_rx_mode = team_set_rx_mode, .ndo_set_mac_address = team_set_mac_address, .ndo_change_mtu = team_change_mtu, .ndo_get_stats64 = team_get_stats64, .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = team_poll_controller, .ndo_netpoll_setup = team_netpoll_setup, .ndo_netpoll_cleanup = team_netpoll_cleanup, #endif .ndo_add_slave = team_add_slave, .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, .ndo_change_carrier = team_change_carrier, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_features_check = passthru_features_check, }; /*********************** * ethtool interface ***********************/ static void team_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); } static const struct ethtool_ops team_ethtool_ops = { .get_drvinfo = team_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, }; /*********************** * rt netlink interface ***********************/ static void team_setup_by_port(struct net_device *dev, struct net_device *port_dev) { dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); eth_hw_addr_inherit(dev, port_dev); } static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev) { struct team *team = netdev_priv(dev); char *portname = port_dev->name; int err; if (dev->type == port_dev->type) return 0; if (!list_empty(&team->port_list)) { netdev_err(dev, "Device %s is of different type\n", portname); return -EBUSY; } err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); err = notifier_to_errno(err); if (err) { netdev_err(dev, "Refused to change device type\n"); return err; } dev_uc_flush(dev); dev_mc_flush(dev); team_setup_by_port(dev, port_dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); return 0; } static void team_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; /* * Indicate we support unicast address filtering. That way core won't * bring us to promisc mode in case a unicast addr is added. * Let this up to underlay drivers. */ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; /* Don't allow team devices to change network namespaces. */ dev->features |= NETIF_F_NETNS_LOCAL; dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); dev->features |= dev->hw_features; } static int team_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); return register_netdevice(dev); } static int team_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } return 0; } static unsigned int team_get_num_tx_queues(void) { return TEAM_DEFAULT_NUM_TX_QUEUES; } static unsigned int team_get_num_rx_queues(void) { return TEAM_DEFAULT_NUM_RX_QUEUES; } static struct rtnl_link_ops team_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct team), .setup = team_setup, .newlink = team_newlink, .validate = team_validate, .get_num_tx_queues = team_get_num_tx_queues, .get_num_rx_queues = team_get_num_rx_queues, }; /*********************************** * Generic netlink custom interface ***********************************/ static struct genl_family team_nl_family = { .id = GENL_ID_GENERATE, .name = TEAM_GENL_NAME, .version = TEAM_GENL_VERSION, .maxattr = TEAM_ATTR_MAX, .netnsok = true, }; static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = { [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, }, [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 }, [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED }, [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED }, }; static const struct nla_policy team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = { [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, }, [TEAM_ATTR_OPTION_NAME] = { .type = NLA_STRING, .len = TEAM_STRING_MAX_LEN, }, [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY }, }; static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); if (!hdr) { err = -EMSGSIZE; goto err_msg_put; } genlmsg_end(msg, hdr); return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_msg_put: nlmsg_free(msg); return err; } /* * Netlink cmd functions should be locked by following two functions. * Since dev gets held here, that ensures dev won't disappear in between. */ static struct team *team_nl_team_get(struct genl_info *info) { struct net *net = genl_info_net(info); int ifindex; struct net_device *dev; struct team *team; if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX]) return NULL; ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]); dev = dev_get_by_index(net, ifindex); if (!dev || dev->netdev_ops != &team_netdev_ops) { if (dev) dev_put(dev); return NULL; } team = netdev_priv(dev); mutex_lock(&team->lock); return team; } static void team_nl_team_put(struct team *team) { mutex_unlock(&team->lock); dev_put(team->dev); } typedef int team_nl_send_func_t(struct sk_buff *skb, struct team *team, u32 portid); static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) { return genlmsg_unicast(dev_net(team->dev), skb, portid); } static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, struct team_option_inst *opt_inst) { struct nlattr *option_item; struct team_option *option = opt_inst->option; struct team_option_inst_info *opt_inst_info = &opt_inst->info; struct team_gsetter_ctx ctx; int err; ctx.info = opt_inst_info; err = team_option_get(team, opt_inst, &ctx); if (err) return err; option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); if (!option_item) return -EMSGSIZE; if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) goto nest_cancel; if (opt_inst_info->port && nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, opt_inst_info->port->dev->ifindex)) goto nest_cancel; if (opt_inst->option->array_size && nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX, opt_inst_info->array_index)) goto nest_cancel; switch (option->type) { case TEAM_OPTION_TYPE_U32: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) goto nest_cancel; if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val)) goto nest_cancel; break; case TEAM_OPTION_TYPE_STRING: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) goto nest_cancel; if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, ctx.data.str_val)) goto nest_cancel; break; case TEAM_OPTION_TYPE_BINARY: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) goto nest_cancel; if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len, ctx.data.bin_val.ptr)) goto nest_cancel; break; case TEAM_OPTION_TYPE_BOOL: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) goto nest_cancel; if (ctx.data.bool_val && nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; case TEAM_OPTION_TYPE_S32: if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) goto nest_cancel; if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) goto nest_cancel; break; default: BUG(); } if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) goto nest_cancel; if (opt_inst->changed) { if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) goto nest_cancel; opt_inst->changed = false; } nla_nest_end(skb, option_item); return 0; nest_cancel: nla_nest_cancel(skb, option_item); return -EMSGSIZE; } static int __send_and_alloc_skb(struct sk_buff **pskb, struct team *team, u32 portid, team_nl_send_func_t *send_func) { int err; if (*pskb) { err = send_func(*pskb, team, portid); if (err) return err; } *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!*pskb) return -ENOMEM; return 0; } static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct list_head *sel_opt_inst_list) { struct nlattr *option_list; struct nlmsghdr *nlh; void *hdr; struct team_option_inst *opt_inst; int err; struct sk_buff *skb = NULL; bool incomplete; int i; opt_inst = list_first_entry(sel_opt_inst_list, struct team_option_inst, tmp_list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); if (!option_list) goto nla_put_failure; i = 0; incomplete = false; list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) { err = team_nl_fill_one_option_get(skb, team, opt_inst); if (err) { if (err == -EMSGSIZE) { if (!i) goto errout; incomplete = true; break; } goto errout; } i++; } nla_nest_end(skb, option_list); genlmsg_end(skb, hdr); if (incomplete) goto start_again; send_done: nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; errout: genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) { struct team *team; struct team_option_inst *opt_inst; int err; LIST_HEAD(sel_opt_inst_list); team = team_nl_team_get(info); if (!team) return -EINVAL; list_for_each_entry(opt_inst, &team->option_inst_list, list) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, &sel_opt_inst_list); team_nl_team_put(team); return err; } static int team_nl_send_event_options_get(struct team *team, struct list_head *sel_opt_inst_list); static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err = 0; int i; struct nlattr *nl_option; LIST_HEAD(opt_inst_list); team = team_nl_team_get(info); if (!team) return -EINVAL; err = -EINVAL; if (!info->attrs[TEAM_ATTR_LIST_OPTION]) { err = -EINVAL; goto team_put; } nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; bool opt_is_array = false; struct team_option_inst *opt_inst; char *opt_name; bool opt_found = false; if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) { err = -EINVAL; goto team_put; } err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX, nl_option, team_nl_option_policy); if (err) goto team_put; if (!opt_attrs[TEAM_ATTR_OPTION_NAME] || !opt_attrs[TEAM_ATTR_OPTION_TYPE]) { err = -EINVAL; goto team_put; } switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) { case NLA_U32: opt_type = TEAM_OPTION_TYPE_U32; break; case NLA_STRING: opt_type = TEAM_OPTION_TYPE_STRING; break; case NLA_BINARY: opt_type = TEAM_OPTION_TYPE_BINARY; break; case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; case NLA_S32: opt_type = TEAM_OPTION_TYPE_S32; break; default: goto team_put; } attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA]; if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) { err = -EINVAL; goto team_put; } opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; if (attr) opt_port_ifindex = nla_get_u32(attr); attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX]; if (attr) { opt_is_array = true; opt_array_index = nla_get_u32(attr); } list_for_each_entry(opt_inst, &team->option_inst_list, list) { struct team_option *option = opt_inst->option; struct team_gsetter_ctx ctx; struct team_option_inst_info *opt_inst_info; int tmp_ifindex; opt_inst_info = &opt_inst->info; tmp_ifindex = opt_inst_info->port ? opt_inst_info->port->dev->ifindex : 0; if (option->type != opt_type || strcmp(option->name, opt_name) || tmp_ifindex != opt_port_ifindex || (option->array_size && !opt_is_array) || opt_inst_info->array_index != opt_array_index) continue; opt_found = true; ctx.info = opt_inst_info; switch (opt_type) { case TEAM_OPTION_TYPE_U32: ctx.data.u32_val = nla_get_u32(attr_data); break; case TEAM_OPTION_TYPE_STRING: if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { err = -EINVAL; goto team_put; } ctx.data.str_val = nla_data(attr_data); break; case TEAM_OPTION_TYPE_BINARY: ctx.data.bin_val.len = nla_len(attr_data); ctx.data.bin_val.ptr = nla_data(attr_data); break; case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; case TEAM_OPTION_TYPE_S32: ctx.data.s32_val = nla_get_s32(attr_data); break; default: BUG(); } err = team_option_set(team, opt_inst, &ctx); if (err) goto team_put; opt_inst->changed = true; list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } } err = team_nl_send_event_options_get(team, &opt_inst_list); team_put: team_nl_team_put(team); return err; } static int team_nl_fill_one_port_get(struct sk_buff *skb, struct team_port *port) { struct nlattr *port_item; port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); if (!port_item) goto nest_cancel; if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex)) goto nest_cancel; if (port->changed) { if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED)) goto nest_cancel; port->changed = false; } if ((port->removed && nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) || (port->state.linkup && nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) || nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) || nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex)) goto nest_cancel; nla_nest_end(skb, port_item); return 0; nest_cancel: nla_nest_cancel(skb, port_item); return -EMSGSIZE; } static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct team_port *one_port) { struct nlattr *port_list; struct nlmsghdr *nlh; void *hdr; struct team_port *port; int err; struct sk_buff *skb = NULL; bool incomplete; int i; port = list_first_entry_or_null(&team->port_list, struct team_port, list); start_again: err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_PORT_LIST_GET); if (!hdr) return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); if (!port_list) goto nla_put_failure; i = 0; incomplete = false; /* If one port is selected, called wants to send port list containing * only this port. Otherwise go through all listed ports and send all */ if (one_port) { err = team_nl_fill_one_port_get(skb, one_port); if (err) goto errout; } else if (port) { list_for_each_entry_from(port, &team->port_list, list) { err = team_nl_fill_one_port_get(skb, port); if (err) { if (err == -EMSGSIZE) { if (!i) goto errout; incomplete = true; break; } goto errout; } i++; } } nla_nest_end(skb, port_list); genlmsg_end(skb, hdr); if (incomplete) goto start_again; send_done: nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; errout: genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } static int team_nl_cmd_port_list_get(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err; team = team_nl_team_get(info); if (!team) return -EINVAL; err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, NULL); team_nl_team_put(team); return err; } static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, .policy = team_nl_policy, }, { .cmd = TEAM_CMD_OPTIONS_SET, .doit = team_nl_cmd_options_set, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_OPTIONS_GET, .doit = team_nl_cmd_options_get, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = TEAM_CMD_PORT_LIST_GET, .doit = team_nl_cmd_port_list_get, .policy = team_nl_policy, .flags = GENL_ADMIN_PERM, }, }; static const struct genl_multicast_group team_nl_mcgrps[] = { { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), skb, 0, 0, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, struct list_head *sel_opt_inst_list) { return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast, sel_opt_inst_list); } static int team_nl_send_event_port_get(struct team *team, struct team_port *port) { return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast, port); } static int team_nl_init(void) { return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, team_nl_mcgrps); } static void team_nl_fini(void) { genl_unregister_family(&team_nl_family); } /****************** * Change checkers ******************/ static void __team_options_change_check(struct team *team) { int err; struct team_option_inst *opt_inst; LIST_HEAD(sel_opt_inst_list); list_for_each_entry(opt_inst, &team->option_inst_list, list) { if (opt_inst->changed) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); } err = team_nl_send_event_options_get(team, &sel_opt_inst_list); if (err && err != -ESRCH) netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", err); } /* rtnl lock is held */ static void __team_port_change_send(struct team_port *port, bool linkup) { int err; port->changed = true; port->state.linkup = linkup; team_refresh_port_linkup(port); if (linkup) { struct ethtool_cmd ecmd; err = __ethtool_get_settings(port->dev, &ecmd); if (!err) { port->state.speed = ethtool_cmd_speed(&ecmd); port->state.duplex = ecmd.duplex; goto send_event; } } port->state.speed = 0; port->state.duplex = 0; send_event: err = team_nl_send_event_port_get(port->team, port); if (err && err != -ESRCH) netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", port->dev->name, err); } static void __team_carrier_check(struct team *team) { struct team_port *port; bool team_linkup; if (team->user_carrier_enabled) return; team_linkup = false; list_for_each_entry(port, &team->port_list, list) { if (port->linkup) { team_linkup = true; break; } } if (team_linkup) netif_carrier_on(team->dev); else netif_carrier_off(team->dev); } static void __team_port_change_check(struct team_port *port, bool linkup) { if (port->state.linkup != linkup) __team_port_change_send(port, linkup); __team_carrier_check(port->team); } static void __team_port_change_port_added(struct team_port *port, bool linkup) { __team_port_change_send(port, linkup); __team_carrier_check(port->team); } static void __team_port_change_port_removed(struct team_port *port) { port->removed = true; __team_port_change_send(port, false); __team_carrier_check(port->team); } static void team_port_change_check(struct team_port *port, bool linkup) { struct team *team = port->team; mutex_lock(&team->lock); __team_port_change_check(port, linkup); mutex_unlock(&team->lock); } /************************************ * Net device notifier event handler ************************************/ static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port; port = team_port_get_rtnl(dev); if (!port) return NOTIFY_DONE; switch (event) { case NETDEV_UP: if (netif_carrier_ok(dev)) team_port_change_check(port, true); break; case NETDEV_DOWN: team_port_change_check(port, false); break; case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, !!netif_carrier_ok(port->dev)); break; case NETDEV_UNREGISTER: team_del_slave(port->team->dev, dev); break; case NETDEV_FEAT_CHANGE: team_compute_features(port->team); break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ if (!port->team->port_mtu_change_allowed) return NOTIFY_BAD; break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; case NETDEV_RESEND_IGMP: /* Propagate to master device */ call_netdevice_notifiers(event, port->team->dev); break; } return NOTIFY_DONE; } static struct notifier_block team_notifier_block __read_mostly = { .notifier_call = team_device_event, }; /*********************** * Module init and exit ***********************/ static int __init team_module_init(void) { int err; register_netdevice_notifier(&team_notifier_block); err = rtnl_link_register(&team_link_ops); if (err) goto err_rtnl_reg; err = team_nl_init(); if (err) goto err_nl_init; return 0; err_nl_init: rtnl_link_unregister(&team_link_ops); err_rtnl_reg: unregister_netdevice_notifier(&team_notifier_block); return err; } static void __exit team_module_exit(void) { team_nl_fini(); rtnl_link_unregister(&team_link_ops); unregister_netdevice_notifier(&team_notifier_block); } module_init(team_module_init); module_exit(team_module_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>"); MODULE_DESCRIPTION("Ethernet team device driver"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
gpl-2.0
AndroidGX/SimpleGX-L-5.0.2_BOE2_G901F
drivers/sensorhub/stm/factory/gyro_mpu6500.c
532
19120
/* * Copyright (C) 2012, Samsung Electronics Co. Ltd. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include "../ssp.h" /*************************************************************************/ /* factory Sysfs */ /*************************************************************************/ #define VENDOR "INVENSENSE" #define CHIP_ID "MPU6500" #define CALIBRATION_FILE_PATH "/efs/gyro_cal_data" #define VERBOSE_OUT 1 #define CALIBRATION_DATA_AMOUNT 20 #define DEF_GYRO_FULLSCALE 2000 #define DEF_GYRO_SENS (32768 / DEF_GYRO_FULLSCALE) #define DEF_BIAS_LSB_THRESH_SELF (20 * DEF_GYRO_SENS) #define DEF_BIAS_LSB_THRESH_SELF_6500 (30 * DEF_GYRO_SENS) #define DEF_RMS_LSB_TH_SELF (5 * DEF_GYRO_SENS) #define DEF_RMS_THRESH ((DEF_RMS_LSB_TH_SELF) * (DEF_RMS_LSB_TH_SELF)) #define DEF_SCALE_FOR_FLOAT (1000) #define DEF_RMS_SCALE_FOR_RMS (10000) #define DEF_SQRT_SCALE_FOR_RMS (100) static ssize_t gyro_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", VENDOR); } static ssize_t gyro_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", CHIP_ID); } int gyro_open_calibration(struct ssp_data *data) { int iRet = 0; mm_segment_t old_fs; struct file *cal_filp = NULL; old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(CALIBRATION_FILE_PATH, O_RDONLY, 0666); if (IS_ERR(cal_filp)) { set_fs(old_fs); iRet = PTR_ERR(cal_filp); data->gyrocal.x = 0; data->gyrocal.y = 0; data->gyrocal.z = 0; return iRet; } iRet = cal_filp->f_op->read(cal_filp, (char *)&data->gyrocal, 3 * sizeof(int), &cal_filp->f_pos); if (iRet != 3 * sizeof(int)) iRet = -EIO; filp_close(cal_filp, current->files); set_fs(old_fs); ssp_dbg("[SSP]: open gyro calibration %d, %d, %d\n", data->gyrocal.x, data->gyrocal.y, data->gyrocal.z); return iRet; } static int save_gyro_caldata(struct ssp_data *data, s16 *iCalData) { int iRet = 0; struct file *cal_filp = NULL; mm_segment_t old_fs; data->gyrocal.x = iCalData[0] << 2; data->gyrocal.y = iCalData[1] << 2; data->gyrocal.z = iCalData[2] << 2; ssp_dbg("[SSP]: do gyro calibrate %d, %d, %d\n", data->gyrocal.x, data->gyrocal.y, data->gyrocal.z); old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(CALIBRATION_FILE_PATH, O_CREAT | O_TRUNC | O_WRONLY, 0666); if (IS_ERR(cal_filp)) { pr_err("[SSP]: %s - Can't open calibration file\n", __func__); set_fs(old_fs); iRet = PTR_ERR(cal_filp); return -EIO; } iRet = cal_filp->f_op->write(cal_filp, (char *)&data->gyrocal, 3 * sizeof(int), &cal_filp->f_pos); if (iRet != 3 * sizeof(int)) { pr_err("[SSP]: %s - Can't write gyro cal to file\n", __func__); iRet = -EIO; } filp_close(cal_filp, current->files); set_fs(old_fs); return iRet; } int set_gyro_cal(struct ssp_data *data) { int iRet = 0; struct ssp_msg *msg; s16 gyro_cal[3]; if (!(data->uSensorState & (1 << GYROSCOPE_SENSOR))) { pr_info("[SSP]: %s - Skip this function!!!"\ ", gyro sensor is not connected(0x%x)\n", __func__, data->uSensorState); return iRet; } gyro_cal[0] = data->gyrocal.x; gyro_cal[1] = data->gyrocal.y; gyro_cal[2] = data->gyrocal.z; msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = MSG2SSP_AP_MCU_SET_GYRO_CAL; msg->length = 6; msg->options = AP2HUB_WRITE; msg->buffer = (char*) kzalloc(6, GFP_KERNEL); msg->free_buffer = 1; memcpy(msg->buffer, gyro_cal, 6); iRet = ssp_spi_async(data, msg); if (iRet != SUCCESS) { pr_err("[SSP]: %s - i2c fail %d\n", __func__, iRet); iRet = ERROR; } pr_info("[SSP] Set gyro cal data %d, %d, %d\n", gyro_cal[0], gyro_cal[1], gyro_cal[2]); return iRet; } static ssize_t gyro_power_off(struct device *dev, struct device_attribute *attr, char *buf) { ssp_dbg("[SSP]: %s\n", __func__); return sprintf(buf, "%d\n", 1); } static ssize_t gyro_power_on(struct device *dev, struct device_attribute *attr, char *buf) { ssp_dbg("[SSP]: %s\n", __func__); return sprintf(buf, "%d\n", 1); } short mpu6500_gyro_get_temp(struct ssp_data *data) { char chTempBuf[2] = { 0}; unsigned char reg[2]; short temperature = 0; int iRet = 0; struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GYROSCOPE_TEMP_FACTORY; msg->length = 2; msg->options = AP2HUB_READ; msg->buffer = chTempBuf; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 3000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gyro Temp Timeout!!\n", __func__); goto exit; } reg[0] = chTempBuf[1]; reg[1] = chTempBuf[0]; temperature = (short) (((reg[0]) << 8) | reg[1]); ssp_dbg("[SSP]: %s - %d\n", __func__, temperature); exit: return temperature; } char k330_gyro_get_temp(struct ssp_data *data) { char chTemp = 0; int iRet = 0; struct ssp_msg *msg; if (!(data->uSensorState & (1 << GYROSCOPE_SENSOR))) goto exit; msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GYROSCOPE_TEMP_FACTORY; msg->length = 1; msg->options = AP2HUB_READ; msg->buffer = &chTemp; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 3000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gyro Temp Timeout!!\n", __func__); goto exit; } ssp_dbg("[SSP]: %s - %d\n", __func__, chTemp); exit: return chTemp; } static ssize_t gyro_get_temp(struct device *dev, struct device_attribute *attr, char *buf) { short temperature = 0; struct ssp_data *data = dev_get_drvdata(dev); temperature = mpu6500_gyro_get_temp(data); return sprintf(buf, "%d\n", temperature); } u32 mpu6050_selftest_sqrt(u32 sqsum) { u32 sq_rt; u32 g0, g1, g2, g3, g4; u32 seed; u32 next; u32 step; g4 = sqsum / 100000000; g3 = (sqsum - g4 * 100000000) / 1000000; g2 = (sqsum - g4 * 100000000 - g3 * 1000000) / 10000; g1 = (sqsum - g4 * 100000000 - g3 * 1000000 - g2 * 10000) / 100; g0 = (sqsum - g4 * 100000000 - g3 * 1000000 - g2 * 10000 - g1 * 100); next = g4; step = 0; seed = 0; while (((seed + 1) * (step + 1)) <= next) { step++; seed++; } sq_rt = seed * 10000; next = (next - (seed * step)) * 100 + g3; step = 0; seed = 2 * seed * 10; while (((seed + 1) * (step + 1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 1000; next = (next - seed * step) * 100 + g2; seed = (seed + step) * 10; step = 0; while (((seed + 1) * (step + 1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 100; next = (next - seed * step) * 100 + g1; seed = (seed + step) * 10; step = 0; while (((seed + 1) * (step + 1)) <= next) { step++; seed++; } sq_rt = sq_rt + step * 10; next = (next - seed * step) * 100 + g0; seed = (seed + step) * 10; step = 0; while (((seed + 1) * (step + 1)) <= next) { step++; seed++; } sq_rt = sq_rt + step; return sq_rt; } ssize_t k330_gyro_selftest(char *buf, struct ssp_data *data) { char chTempBuf[36] = { 0,}; u8 uFifoPass = 2; u8 uBypassPass = 2; u8 uCalPass = 0; u8 dummy[2] = {0,}; s16 iNOST[3] = {0,}, iST[3] = {0,}, iCalData[3] = {0,}; s16 iZeroRateData[3] = {0,}, fifo_data[4] = {0,}; int iRet = 0; struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GYROSCOPE_FACTORY; msg->length = 36; msg->options = AP2HUB_READ; msg->buffer = chTempBuf; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 5000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gyro Selftest Timeout!!\n", __func__); goto exit; } data->uTimeOutCnt = 0; iNOST[0] = (s16)((chTempBuf[0] << 8) + chTempBuf[1]); iNOST[1] = (s16)((chTempBuf[2] << 8) + chTempBuf[3]); iNOST[2] = (s16)((chTempBuf[4] << 8) + chTempBuf[5]); iST[0] = (s16)((chTempBuf[6] << 8) + chTempBuf[7]); iST[1] = (s16)((chTempBuf[8] << 8) + chTempBuf[9]); iST[2] = (s16)((chTempBuf[10] << 8) + chTempBuf[11]); iCalData[0] = (s16)((chTempBuf[12] << 8) + chTempBuf[13]); iCalData[1] =( s16)((chTempBuf[14] << 8) + chTempBuf[15]); iCalData[2] = (s16)((chTempBuf[16] << 8) + chTempBuf[17]); iZeroRateData[0] = (s16)((chTempBuf[18] << 8) + chTempBuf[19]); iZeroRateData[1] = (s16)((chTempBuf[20] << 8) + chTempBuf[21]); iZeroRateData[2] = (s16)((chTempBuf[22] << 8) + chTempBuf[23]); fifo_data[0] = chTempBuf[24]; fifo_data[1] = (s16)((chTempBuf[25] << 8) + chTempBuf[26]); fifo_data[2] = (s16)((chTempBuf[27] << 8) + chTempBuf[28]); fifo_data[3] = (s16)((chTempBuf[29] << 8) + chTempBuf[30]); uCalPass = chTempBuf[31]; uFifoPass = chTempBuf[32]; uBypassPass = chTempBuf[33]; dummy[0] = chTempBuf[34]; dummy[1] = chTempBuf[35]; pr_info("[SSP] %s dummy = 0x%X, 0x%X\n", __func__, dummy[0], dummy[1]); if (uFifoPass && uBypassPass && uCalPass) save_gyro_caldata(data, iCalData); ssp_dbg("[SSP]: %s - %d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", __func__, iNOST[0], iNOST[1], iNOST[2], iST[0], iST[1], iST[2], iZeroRateData[0], iZeroRateData[1], iZeroRateData[2], fifo_data[0], fifo_data[1], fifo_data[2], fifo_data[3], uFifoPass & uBypassPass & uCalPass, uFifoPass, uCalPass); exit: return sprintf(buf, "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n", iNOST[0], iNOST[1], iNOST[2], iST[0], iST[1], iST[2], iZeroRateData[0], iZeroRateData[1], iZeroRateData[2], fifo_data[0], fifo_data[1], fifo_data[2], fifo_data[3], uFifoPass & uBypassPass & uCalPass, uFifoPass, uCalPass); } ssize_t mpu6500_gyro_selftest(char *buf, struct ssp_data *data) { char chTempBuf[36] = { 0,}; u8 initialized = 0; s8 hw_result = 0; int i = 0, j = 0, total_count = 0, ret_val = 0; long avg[3] = {0,}, rms[3] = {0,}; int gyro_bias[3] = {0,}, gyro_rms[3] = {0,}; s16 shift_ratio[3] = {0,}; s16 iCalData[3] = {0,}; char a_name[3][2] = { "X", "Y", "Z" }; int iRet = 0; int dps_rms[3] = { 0, }; u32 temp = 0; int bias_thresh = DEF_BIAS_LSB_THRESH_SELF_6500; struct ssp_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GYROSCOPE_FACTORY; msg->length = 36; msg->options = AP2HUB_READ; msg->buffer = chTempBuf; msg->free_buffer = 0; iRet = ssp_spi_sync(data, msg, 7000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gyro Selftest Timeout!!\n", __func__); ret_val = 1; goto exit; } data->uTimeOutCnt = 0; pr_err("[SSP]%d %d %d %d %d %d %d %d %d %d %d %d", chTempBuf[0], chTempBuf[1], chTempBuf[2], chTempBuf[3], chTempBuf[4], chTempBuf[5], chTempBuf[6], chTempBuf[7], chTempBuf[8], chTempBuf[9], chTempBuf[10], chTempBuf[11]); initialized = chTempBuf[0]; shift_ratio[0] = (s16)((chTempBuf[2] << 8) + chTempBuf[1]); shift_ratio[1] = (s16)((chTempBuf[4] << 8) + chTempBuf[3]); shift_ratio[2] = (s16)((chTempBuf[6] << 8) + chTempBuf[5]); hw_result = (s8)chTempBuf[7]; total_count = (int)((chTempBuf[11] << 24) + (chTempBuf[10] << 16) + (chTempBuf[9] << 8) + chTempBuf[8]); avg[0] = (long)((chTempBuf[15] << 24) + (chTempBuf[14] << 16) + (chTempBuf[13] << 8) + chTempBuf[12]); avg[1] = (long)((chTempBuf[19] << 24) + (chTempBuf[18] << 16) + (chTempBuf[17] << 8) + chTempBuf[16]); avg[2] = (long)((chTempBuf[23] << 24) + (chTempBuf[22] << 16) + (chTempBuf[21] << 8) + chTempBuf[20]); rms[0] = (long)((chTempBuf[27] << 24) + (chTempBuf[26] << 16) + (chTempBuf[25] << 8) + chTempBuf[24]); rms[1] = (long)((chTempBuf[31] << 24) + (chTempBuf[30] << 16) + (chTempBuf[29] << 8) + chTempBuf[28]); rms[2] = (long)((chTempBuf[35] << 24) + (chTempBuf[34] << 16) + (chTempBuf[33] << 8) + chTempBuf[32]); pr_info("[SSP] init: %d, total cnt: %d\n", initialized, total_count); pr_info("[SSP] hw_result: %d, %d, %d, %d\n", hw_result, shift_ratio[0], shift_ratio[1], shift_ratio[2]); pr_info("[SSP] avg %+8ld %+8ld %+8ld (LSB)\n", avg[0], avg[1], avg[2]); pr_info("[SSP] rms %+8ld %+8ld %+8ld (LSB)\n", rms[0], rms[1], rms[2]); if (total_count == 0) { pr_err("[SSP] %s, total_count is 0. goto exit\n", __func__); ret_val = 2; goto exit; } if (hw_result < 0) { pr_err("[SSP] %s - hw selftest fail(%d), sw selftest skip\n", __func__, hw_result); return sprintf(buf, "-1,0,0,0,0,0,0,%d.%d,%d.%d,%d.%d,0,0,0\n", shift_ratio[0] / 10, shift_ratio[0] % 10, shift_ratio[1] / 10, shift_ratio[1] % 10, shift_ratio[2] / 10, shift_ratio[2] % 10); } gyro_bias[0] = (avg[0] * DEF_SCALE_FOR_FLOAT) / DEF_GYRO_SENS; gyro_bias[1] = (avg[1] * DEF_SCALE_FOR_FLOAT) / DEF_GYRO_SENS; gyro_bias[2] = (avg[2] * DEF_SCALE_FOR_FLOAT) / DEF_GYRO_SENS; iCalData[0] = (s16)avg[0]; iCalData[1] = (s16)avg[1]; iCalData[2] = (s16)avg[2]; if (VERBOSE_OUT) { pr_info("[SSP] abs bias : %+8d.%03d %+8d.%03d %+8d.%03d (dps)\n", (int)abs(gyro_bias[0]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_bias[0]) % DEF_SCALE_FOR_FLOAT, (int)abs(gyro_bias[1]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_bias[1]) % DEF_SCALE_FOR_FLOAT, (int)abs(gyro_bias[2]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_bias[2]) % DEF_SCALE_FOR_FLOAT); } for (j = 0; j < 3; j++) { if (unlikely(abs(avg[j]) > bias_thresh)) { pr_err("[SSP] %s-Gyro bias (%ld) exceeded threshold " "(threshold = %d LSB)\n", a_name[j], avg[j], bias_thresh); ret_val |= 1 << (3 + j); } } /* 3rd, check RMS for dead gyros If any of the RMS noise value returns zero, then we might have dead gyro or FIFO/register failure, the part is sleeping, or the part is not responsive */ if (rms[0] == 0 || rms[1] == 0 || rms[2] == 0) ret_val |= 1 << 6; if (VERBOSE_OUT) { pr_info("[SSP] RMS ^ 2 : %+8ld %+8ld %+8ld\n", (long)rms[0] / total_count, (long)rms[1] / total_count, (long)rms[2] / total_count); } for (j = 0; j < 3; j++) { if (unlikely(rms[j] / total_count > DEF_RMS_THRESH)) { pr_err("[SSP] %s-Gyro rms (%ld) exceeded threshold " "(threshold = %d LSB)\n", a_name[j], rms[j] / total_count, DEF_RMS_THRESH); ret_val |= 1 << (7 + j); } } for (i = 0; i < 3; i++) { if (rms[i] > 10000) { temp = ((u32) (rms[i] / total_count)) * DEF_RMS_SCALE_FOR_RMS; } else { temp = ((u32) (rms[i] * DEF_RMS_SCALE_FOR_RMS)) / total_count; } if (rms[i] < 0) temp = 1 << 31; dps_rms[i] = mpu6050_selftest_sqrt(temp) / DEF_GYRO_SENS; gyro_rms[i] = dps_rms[i] * DEF_SCALE_FOR_FLOAT / DEF_SQRT_SCALE_FOR_RMS; } pr_info("[SSP] RMS : %+8d.%03d %+8d.%03d %+8d.%03d (dps)\n", (int)abs(gyro_rms[0]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_rms[0]) % DEF_SCALE_FOR_FLOAT, (int)abs(gyro_rms[1]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_rms[1]) % DEF_SCALE_FOR_FLOAT, (int)abs(gyro_rms[2]) / DEF_SCALE_FOR_FLOAT, (int)abs(gyro_rms[2]) % DEF_SCALE_FOR_FLOAT); if (likely(!ret_val)) { save_gyro_caldata(data, iCalData); } else { pr_err("[SSP] ret_val != 0, gyrocal is 0 at all axis\n"); data->gyrocal.x = 0; data->gyrocal.y = 0; data->gyrocal.z = 0; } exit: ssp_dbg("[SSP]: %s - %d," "%d.%03d,%d.%03d,%d.%03d," "%d.%03d,%d.%03d,%d.%03d," "%d.%d,%d.%d,%d.%d," "%d,%d,%d\n", __func__, ret_val, (int)abs(gyro_bias[0]/1000), (int)abs(gyro_bias[0])%1000, (int)abs(gyro_bias[1]/1000), (int)abs(gyro_bias[1])%1000, (int)abs(gyro_bias[2]/1000), (int)abs(gyro_bias[2])%1000, gyro_rms[0]/1000, (int)abs(gyro_rms[0])%1000, gyro_rms[1]/1000, (int)abs(gyro_rms[1])%1000, gyro_rms[2]/1000, (int)abs(gyro_rms[2])%1000, shift_ratio[0] / 10, shift_ratio[0] % 10, shift_ratio[1] / 10, shift_ratio[1] % 10, shift_ratio[2] / 10, shift_ratio[2] % 10, (int)(total_count/3), (int)(total_count/3), (int)(total_count/3)); return sprintf(buf, "%d," "%d.%03d,%d.%03d,%d.%03d," "%d.%03d,%d.%03d,%d.%03d," "%d.%d,%d.%d,%d.%d," "%d,%d,%d\n", ret_val, (int)abs(gyro_bias[0]/1000), (int)abs(gyro_bias[0])%1000, (int)abs(gyro_bias[1]/1000), (int)abs(gyro_bias[1])%1000, (int)abs(gyro_bias[2]/1000), (int)abs(gyro_bias[2])%1000, gyro_rms[0]/1000, (int)abs(gyro_rms[0])%1000, gyro_rms[1]/1000, (int)abs(gyro_rms[1])%1000, gyro_rms[2]/1000, (int)abs(gyro_rms[2])%1000, shift_ratio[0] / 10, shift_ratio[0] % 10, shift_ratio[1] / 10, shift_ratio[1] % 10, shift_ratio[2] / 10, shift_ratio[2] % 10, (int)(total_count/3), (int)(total_count/3), (int)(total_count/3)); } static ssize_t gyro_selftest_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return mpu6500_gyro_selftest(buf, data); } static ssize_t gyro_selftest_dps_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int iNewDps = 0; int iRet = 0; char chTempBuf = 0; struct ssp_data *data = dev_get_drvdata(dev); struct ssp_msg *msg; if (!(data->uSensorState & (1 << GYROSCOPE_SENSOR))) goto exit; msg = kzalloc(sizeof(*msg), GFP_KERNEL); msg->cmd = GYROSCOPE_DPS_FACTORY; msg->length = 1; msg->options = AP2HUB_READ; msg->buffer = &chTempBuf; msg->free_buffer = 0; sscanf(buf, "%d", &iNewDps); if (iNewDps == GYROSCOPE_DPS250) msg->options |= 0 << SSP_GYRO_DPS; else if (iNewDps == GYROSCOPE_DPS500) msg->options |= 1 << SSP_GYRO_DPS; else if (iNewDps == GYROSCOPE_DPS2000) msg->options |= 2 << SSP_GYRO_DPS; else { msg->options |= 1 << SSP_GYRO_DPS; iNewDps = GYROSCOPE_DPS500; } iRet = ssp_spi_sync(data, msg, 3000); if (iRet != SUCCESS) { pr_err("[SSP]: %s - Gyro Selftest DPS Timeout!!\n", __func__); goto exit; } if (chTempBuf != SUCCESS) { pr_err("[SSP]: %s - Gyro Selftest DPS Error!!\n", __func__); goto exit; } data->uGyroDps = (unsigned int)iNewDps; pr_err("[SSP]: %s - %u dps stored\n", __func__, data->uGyroDps); exit: return count; } static ssize_t gyro_selftest_dps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ssp_data *data = dev_get_drvdata(dev); return sprintf(buf, "%u\n", data->uGyroDps); } static DEVICE_ATTR(name, S_IRUGO, gyro_name_show, NULL); static DEVICE_ATTR(vendor, S_IRUGO, gyro_vendor_show, NULL); static DEVICE_ATTR(power_off, S_IRUGO, gyro_power_off, NULL); static DEVICE_ATTR(power_on, S_IRUGO, gyro_power_on, NULL); static DEVICE_ATTR(temperature, S_IRUGO, gyro_get_temp, NULL); static DEVICE_ATTR(selftest, S_IRUGO, gyro_selftest_show, NULL); static DEVICE_ATTR(selftest_dps, S_IRUGO | S_IWUSR | S_IWGRP, gyro_selftest_dps_show, gyro_selftest_dps_store); static struct device_attribute *gyro_attrs[] = { &dev_attr_name, &dev_attr_vendor, &dev_attr_selftest, &dev_attr_power_on, &dev_attr_power_off, &dev_attr_temperature, &dev_attr_selftest_dps, NULL, }; void initialize_gyro_factorytest(struct ssp_data *data) { sensors_register(data->gyro_device, data, gyro_attrs, "gyro_sensor"); } void remove_gyro_factorytest(struct ssp_data *data) { sensors_unregister(data->gyro_device, gyro_attrs); }
gpl-2.0
Larspolo/linux
drivers/media/rc/gpio-ir-recv.c
532
6160
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <media/rc-core.h> #include <media/gpio-ir-recv.h> #define GPIO_IR_DRIVER_NAME "gpio-rc-recv" #define GPIO_IR_DEVICE_NAME "gpio_ir_recv" struct gpio_rc_dev { struct rc_dev *rcdev; int gpio_nr; bool active_low; }; #ifdef CONFIG_OF /* * Translate OpenFirmware node properties into platform_data */ static int gpio_ir_recv_get_devtree_pdata(struct device *dev, struct gpio_ir_recv_platform_data *pdata) { struct device_node *np = dev->of_node; enum of_gpio_flags flags; int gpio; gpio = of_get_gpio_flags(np, 0, &flags); if (gpio < 0) { if (gpio != -EPROBE_DEFER) dev_err(dev, "Failed to get gpio flags (%d)\n", gpio); return gpio; } pdata->gpio_nr = gpio; pdata->active_low = (flags & OF_GPIO_ACTIVE_LOW); /* probe() takes care of map_name == NULL or allowed_protos == 0 */ pdata->map_name = of_get_property(np, "linux,rc-map-name", NULL); pdata->allowed_protos = 0; return 0; } static const struct of_device_id gpio_ir_recv_of_match[] = { { .compatible = "gpio-ir-receiver", }, { }, }; MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match); #else /* !CONFIG_OF */ #define gpio_ir_recv_get_devtree_pdata(dev, pdata) (-ENOSYS) #endif static irqreturn_t gpio_ir_recv_irq(int irq, void *dev_id) { struct gpio_rc_dev *gpio_dev = dev_id; int gval; int rc = 0; enum raw_event_type type = IR_SPACE; gval = gpio_get_value(gpio_dev->gpio_nr); if (gval < 0) goto err_get_value; if (gpio_dev->active_low) gval = !gval; if (gval == 1) type = IR_PULSE; rc = ir_raw_event_store_edge(gpio_dev->rcdev, type); if (rc < 0) goto err_get_value; ir_raw_event_handle(gpio_dev->rcdev); err_get_value: return IRQ_HANDLED; } static int gpio_ir_recv_probe(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev; struct rc_dev *rcdev; const struct gpio_ir_recv_platform_data *pdata = pdev->dev.platform_data; int rc; if (pdev->dev.of_node) { struct gpio_ir_recv_platform_data *dtpdata = devm_kzalloc(&pdev->dev, sizeof(*dtpdata), GFP_KERNEL); if (!dtpdata) return -ENOMEM; rc = gpio_ir_recv_get_devtree_pdata(&pdev->dev, dtpdata); if (rc) return rc; pdata = dtpdata; } if (!pdata) return -EINVAL; if (pdata->gpio_nr < 0) return -EINVAL; gpio_dev = kzalloc(sizeof(struct gpio_rc_dev), GFP_KERNEL); if (!gpio_dev) return -ENOMEM; rcdev = rc_allocate_device(); if (!rcdev) { rc = -ENOMEM; goto err_allocate_device; } rcdev->priv = gpio_dev; rcdev->driver_type = RC_DRIVER_IR_RAW; rcdev->input_name = GPIO_IR_DEVICE_NAME; rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0"; rcdev->input_id.bustype = BUS_HOST; rcdev->input_id.vendor = 0x0001; rcdev->input_id.product = 0x0001; rcdev->input_id.version = 0x0100; rcdev->dev.parent = &pdev->dev; rcdev->driver_name = GPIO_IR_DRIVER_NAME; if (pdata->allowed_protos) rcdev->allowed_protocols = pdata->allowed_protos; else rcdev->allowed_protocols = RC_BIT_ALL; rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY; gpio_dev->rcdev = rcdev; gpio_dev->gpio_nr = pdata->gpio_nr; gpio_dev->active_low = pdata->active_low; rc = gpio_request(pdata->gpio_nr, "gpio-ir-recv"); if (rc < 0) goto err_gpio_request; rc = gpio_direction_input(pdata->gpio_nr); if (rc < 0) goto err_gpio_direction_input; rc = rc_register_device(rcdev); if (rc < 0) { dev_err(&pdev->dev, "failed to register rc device\n"); goto err_register_rc_device; } platform_set_drvdata(pdev, gpio_dev); rc = request_any_context_irq(gpio_to_irq(pdata->gpio_nr), gpio_ir_recv_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "gpio-ir-recv-irq", gpio_dev); if (rc < 0) goto err_request_irq; return 0; err_request_irq: rc_unregister_device(rcdev); rcdev = NULL; err_register_rc_device: err_gpio_direction_input: gpio_free(pdata->gpio_nr); err_gpio_request: rc_free_device(rcdev); err_allocate_device: kfree(gpio_dev); return rc; } static int gpio_ir_recv_remove(struct platform_device *pdev) { struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); free_irq(gpio_to_irq(gpio_dev->gpio_nr), gpio_dev); rc_unregister_device(gpio_dev->rcdev); gpio_free(gpio_dev->gpio_nr); kfree(gpio_dev); return 0; } #ifdef CONFIG_PM static int gpio_ir_recv_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); if (device_may_wakeup(dev)) enable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr)); else disable_irq(gpio_to_irq(gpio_dev->gpio_nr)); return 0; } static int gpio_ir_recv_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); if (device_may_wakeup(dev)) disable_irq_wake(gpio_to_irq(gpio_dev->gpio_nr)); else enable_irq(gpio_to_irq(gpio_dev->gpio_nr)); return 0; } static const struct dev_pm_ops gpio_ir_recv_pm_ops = { .suspend = gpio_ir_recv_suspend, .resume = gpio_ir_recv_resume, }; #endif static struct platform_driver gpio_ir_recv_driver = { .probe = gpio_ir_recv_probe, .remove = gpio_ir_recv_remove, .driver = { .name = GPIO_IR_DRIVER_NAME, .of_match_table = of_match_ptr(gpio_ir_recv_of_match), #ifdef CONFIG_PM .pm = &gpio_ir_recv_pm_ops, #endif }, }; module_platform_driver(gpio_ir_recv_driver); MODULE_DESCRIPTION("GPIO IR Receiver driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
locusf/linux
drivers/media/i2c/sr030pc30.c
532
20266
/* * Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP * * Copyright (C) 2010 Samsung Electronics Co., Ltd * Author: Sylwester Nawrocki, s.nawrocki@samsung.com * * Based on original driver authored by Dongsoo Nathaniel Kim * and HeungJun Kim <riverful.kim@samsung.com>. * * Based on mt9v011 Micron Digital Image Sensor driver * Copyright (c) 2009 Mauro Carvalho Chehab * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-mediabus.h> #include <media/v4l2-ctrls.h> #include <media/sr030pc30.h> static int debug; module_param(debug, int, 0644); #define MODULE_NAME "SR030PC30" /* * Register offsets within a page * b15..b8 - page id, b7..b0 - register address */ #define POWER_CTRL_REG 0x0001 #define PAGEMODE_REG 0x03 #define DEVICE_ID_REG 0x0004 #define NOON010PC30_ID 0x86 #define SR030PC30_ID 0x8C #define VDO_CTL1_REG 0x0010 #define SUBSAMPL_NONE_VGA 0 #define SUBSAMPL_QVGA 0x10 #define SUBSAMPL_QQVGA 0x20 #define VDO_CTL2_REG 0x0011 #define SYNC_CTL_REG 0x0012 #define WIN_ROWH_REG 0x0020 #define WIN_ROWL_REG 0x0021 #define WIN_COLH_REG 0x0022 #define WIN_COLL_REG 0x0023 #define WIN_HEIGHTH_REG 0x0024 #define WIN_HEIGHTL_REG 0x0025 #define WIN_WIDTHH_REG 0x0026 #define WIN_WIDTHL_REG 0x0027 #define HBLANKH_REG 0x0040 #define HBLANKL_REG 0x0041 #define VSYNCH_REG 0x0042 #define VSYNCL_REG 0x0043 /* page 10 */ #define ISP_CTL_REG(n) (0x1010 + (n)) #define YOFS_REG 0x1040 #define DARK_YOFS_REG 0x1041 #define AG_ABRTH_REG 0x1050 #define SAT_CTL_REG 0x1060 #define BSAT_REG 0x1061 #define RSAT_REG 0x1062 #define AG_SAT_TH_REG 0x1063 /* page 11 */ #define ZLPF_CTRL_REG 0x1110 #define ZLPF_CTRL2_REG 0x1112 #define ZLPF_AGH_THR_REG 0x1121 #define ZLPF_THR_REG 0x1160 #define ZLPF_DYN_THR_REG 0x1160 /* page 12 */ #define YCLPF_CTL1_REG 0x1240 #define YCLPF_CTL2_REG 0x1241 #define YCLPF_THR_REG 0x1250 #define BLPF_CTL_REG 0x1270 #define BLPF_THR1_REG 0x1274 #define BLPF_THR2_REG 0x1275 /* page 14 - Lens Shading Compensation */ #define LENS_CTRL_REG 0x1410 #define LENS_XCEN_REG 0x1420 #define LENS_YCEN_REG 0x1421 #define LENS_R_COMP_REG 0x1422 #define LENS_G_COMP_REG 0x1423 #define LENS_B_COMP_REG 0x1424 /* page 15 - Color correction */ #define CMC_CTL_REG 0x1510 #define CMC_OFSGH_REG 0x1514 #define CMC_OFSGL_REG 0x1516 #define CMC_SIGN_REG 0x1517 /* Color correction coefficients */ #define CMC_COEF_REG(n) (0x1530 + (n)) /* Color correction offset coefficients */ #define CMC_OFS_REG(n) (0x1540 + (n)) /* page 16 - Gamma correction */ #define GMA_CTL_REG 0x1610 /* Gamma correction coefficients 0.14 */ #define GMA_COEF_REG(n) (0x1630 + (n)) /* page 20 - Auto Exposure */ #define AE_CTL1_REG 0x2010 #define AE_CTL2_REG 0x2011 #define AE_FRM_CTL_REG 0x2020 #define AE_FINE_CTL_REG(n) (0x2028 + (n)) #define EXP_TIMEH_REG 0x2083 #define EXP_TIMEM_REG 0x2084 #define EXP_TIMEL_REG 0x2085 #define EXP_MMINH_REG 0x2086 #define EXP_MMINL_REG 0x2087 #define EXP_MMAXH_REG 0x2088 #define EXP_MMAXM_REG 0x2089 #define EXP_MMAXL_REG 0x208A /* page 22 - Auto White Balance */ #define AWB_CTL1_REG 0x2210 #define AWB_ENABLE 0x80 #define AWB_CTL2_REG 0x2211 #define MWB_ENABLE 0x01 /* RGB gain control (manual WB) when AWB_CTL1[7]=0 */ #define AWB_RGAIN_REG 0x2280 #define AWB_GGAIN_REG 0x2281 #define AWB_BGAIN_REG 0x2282 #define AWB_RMAX_REG 0x2283 #define AWB_RMIN_REG 0x2284 #define AWB_BMAX_REG 0x2285 #define AWB_BMIN_REG 0x2286 /* R, B gain range in bright light conditions */ #define AWB_RMAXB_REG 0x2287 #define AWB_RMINB_REG 0x2288 #define AWB_BMAXB_REG 0x2289 #define AWB_BMINB_REG 0x228A /* manual white balance, when AWB_CTL2[0]=1 */ #define MWB_RGAIN_REG 0x22B2 #define MWB_BGAIN_REG 0x22B3 /* the token to mark an array end */ #define REG_TERM 0xFFFF /* Minimum and maximum exposure time in ms */ #define EXPOS_MIN_MS 1 #define EXPOS_MAX_MS 125 struct sr030pc30_info { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; const struct sr030pc30_platform_data *pdata; const struct sr030pc30_format *curr_fmt; const struct sr030pc30_frmsize *curr_win; unsigned int hflip:1; unsigned int vflip:1; unsigned int sleep:1; struct { /* auto whitebalance control cluster */ struct v4l2_ctrl *awb; struct v4l2_ctrl *red; struct v4l2_ctrl *blue; }; struct { /* auto exposure control cluster */ struct v4l2_ctrl *autoexp; struct v4l2_ctrl *exp; }; u8 i2c_reg_page; }; struct sr030pc30_format { enum v4l2_mbus_pixelcode code; enum v4l2_colorspace colorspace; u16 ispctl1_reg; }; struct sr030pc30_frmsize { u16 width; u16 height; int vid_ctl1; }; struct i2c_regval { u16 addr; u16 val; }; /* supported resolutions */ static const struct sr030pc30_frmsize sr030pc30_sizes[] = { { .width = 640, .height = 480, .vid_ctl1 = SUBSAMPL_NONE_VGA, }, { .width = 320, .height = 240, .vid_ctl1 = SUBSAMPL_QVGA, }, { .width = 160, .height = 120, .vid_ctl1 = SUBSAMPL_QQVGA, }, }; /* supported pixel formats */ static const struct sr030pc30_format sr030pc30_formats[] = { { .code = V4L2_MBUS_FMT_YUYV8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x03, }, { .code = V4L2_MBUS_FMT_YVYU8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x02, }, { .code = V4L2_MBUS_FMT_VYUY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0, }, { .code = V4L2_MBUS_FMT_UYVY8_2X8, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x01, }, { .code = V4L2_MBUS_FMT_RGB565_2X8_BE, .colorspace = V4L2_COLORSPACE_JPEG, .ispctl1_reg = 0x40, }, }; static const struct i2c_regval sr030pc30_base_regs[] = { /* Window size and position within pixel matrix */ { WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 }, { WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 }, { WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 }, { WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 }, { HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 }, { VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 }, { SYNC_CTL_REG, 0 }, /* Color corection and saturation */ { ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 }, { DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 }, { SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 }, { AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 }, { CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C }, { CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F }, { CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 }, { CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 }, { CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 }, { CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 }, { CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f }, { CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 }, { CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 }, { CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 }, { CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 }, /* Color corection coefficients */ { GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 }, { GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 }, { GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D }, { GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E }, { GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF }, { GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA }, { GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC }, { GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF }, /* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */ { ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E }, { ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F }, { ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 }, { YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 }, { BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 }, { BLPF_THR2_REG, 0x04 }, /* Automatic white balance */ { AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 }, { AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B }, { AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 }, { AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 }, { AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 }, /* Auto exposure */ { AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 }, { AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F }, { AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 }, /* Lens shading compensation */ { LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 }, { LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 }, { LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e }, { REG_TERM, 0 }, }; static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd) { return container_of(sd, struct sr030pc30_info, sd); } static inline int set_i2c_page(struct sr030pc30_info *info, struct i2c_client *client, unsigned int reg) { int ret = 0; u32 page = reg >> 8 & 0xFF; if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) { ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page); if (!ret) info->i2c_reg_page = page; } return ret; } static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF); return ret; } static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); int ret = set_i2c_page(info, client, reg_addr); if (!ret) ret = i2c_smbus_write_byte_data( client, reg_addr & 0xFF, val); return ret; } static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd, const struct i2c_regval *msg) { while (msg->addr != REG_TERM) { int ret = cam_i2c_write(sd, msg->addr, msg->val); if (ret) return ret; msg++; } return 0; } /* Device reset and sleep mode control */ static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd, bool reset, bool sleep) { struct sr030pc30_info *info = to_sr030pc30(sd); u8 reg = sleep ? 0xF1 : 0xF0; int ret = 0; if (reset) ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02); if (!ret) { ret = cam_i2c_write(sd, POWER_CTRL_REG, reg); if (!ret) { info->sleep = sleep; if (reset) info->i2c_reg_page = -1; } } return ret; } static int sr030pc30_set_flip(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); s32 reg = cam_i2c_read(sd, VDO_CTL2_REG); if (reg < 0) return reg; reg &= 0x7C; if (info->hflip) reg |= 0x01; if (info->vflip) reg |= 0x02; return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80); } /* Configure resolution, color format and image flip */ static int sr030pc30_set_params(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!info->curr_win) return -EINVAL; /* Configure the resolution through subsampling */ ret = cam_i2c_write(sd, VDO_CTL1_REG, info->curr_win->vid_ctl1); if (!ret && info->curr_fmt) ret = cam_i2c_write(sd, ISP_CTL_REG(0), info->curr_fmt->ispctl1_reg); if (!ret) ret = sr030pc30_set_flip(sd); return ret; } /* Find nearest matching image pixel size. */ static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf) { unsigned int min_err = ~0; int i = ARRAY_SIZE(sr030pc30_sizes); const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0], *match = NULL; while (i--) { int err = abs(fsize->width - mf->width) + abs(fsize->height - mf->height); if (err < min_err) { min_err = err; match = fsize; } fsize++; } if (match) { mf->width = match->width; mf->height = match->height; return 0; } return -EINVAL; } static int sr030pc30_s_ctrl(struct v4l2_ctrl *ctrl) { struct sr030pc30_info *info = container_of(ctrl->handler, struct sr030pc30_info, hdl); struct v4l2_subdev *sd = &info->sd; int ret = 0; v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n", __func__, ctrl->id, ctrl->val); switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: if (ctrl->is_new) { ret = cam_i2c_write(sd, AWB_CTL2_REG, ctrl->val ? 0x2E : 0x2F); if (!ret) ret = cam_i2c_write(sd, AWB_CTL1_REG, ctrl->val ? 0xFB : 0x7B); } if (!ret && info->blue->is_new) ret = cam_i2c_write(sd, MWB_BGAIN_REG, info->blue->val); if (!ret && info->red->is_new) ret = cam_i2c_write(sd, MWB_RGAIN_REG, info->red->val); return ret; case V4L2_CID_EXPOSURE_AUTO: /* auto anti-flicker is also enabled here */ if (ctrl->is_new) ret = cam_i2c_write(sd, AE_CTL1_REG, ctrl->val == V4L2_EXPOSURE_AUTO ? 0xDC : 0x0C); if (info->exp->is_new) { unsigned long expos = info->exp->val; expos = expos * info->pdata->clk_rate / (8 * 1000); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF); } return ret; default: return -EINVAL; } return 0; } static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (!code || index >= ARRAY_SIZE(sr030pc30_formats)) return -EINVAL; *code = sr030pc30_formats[index].code; return 0; } static int sr030pc30_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; if (!mf) return -EINVAL; if (!info->curr_win || !info->curr_fmt) { ret = sr030pc30_set_params(sd); if (ret) return ret; } mf->width = info->curr_win->width; mf->height = info->curr_win->height; mf->code = info->curr_fmt->code; mf->colorspace = info->curr_fmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; } /* Return nearest media bus frame format. */ static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { int i = ARRAY_SIZE(sr030pc30_formats); sr030pc30_try_frame_size(mf); while (i--) if (mf->code == sr030pc30_formats[i].code) break; mf->code = sr030pc30_formats[i].code; return &sr030pc30_formats[i]; } /* Return nearest media bus frame format. */ static int sr030pc30_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { if (!sd || !mf) return -EINVAL; try_fmt(sd, mf); return 0; } static int sr030pc30_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct sr030pc30_info *info = to_sr030pc30(sd); if (!sd || !mf) return -EINVAL; info->curr_fmt = try_fmt(sd, mf); return sr030pc30_set_params(sd); } static int sr030pc30_base_config(struct v4l2_subdev *sd) { struct sr030pc30_info *info = to_sr030pc30(sd); int ret; unsigned long expmin, expmax; ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs); if (!ret) { info->curr_fmt = &sr030pc30_formats[0]; info->curr_win = &sr030pc30_sizes[0]; ret = sr030pc30_set_params(sd); } if (!ret) ret = sr030pc30_pwr_ctrl(sd, false, false); if (!ret && !info->pdata) return ret; expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000); expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000); v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__, expmin, expmax); /* Setting up manual exposure time range */ ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF); if (!ret) ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF); return ret; } static int sr030pc30_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct sr030pc30_info *info = to_sr030pc30(sd); const struct sr030pc30_platform_data *pdata = info->pdata; int ret; if (pdata == NULL) { WARN(1, "No platform data!\n"); return -EINVAL; } /* * Put sensor into power sleep mode before switching off * power and disabling MCLK. */ if (!on) sr030pc30_pwr_ctrl(sd, false, true); /* set_power controls sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, on); if (ret) return ret; } if (on) { ret = sr030pc30_base_config(sd); } else { ret = 0; info->curr_win = NULL; info->curr_fmt = NULL; } return ret; } static const struct v4l2_ctrl_ops sr030pc30_ctrl_ops = { .s_ctrl = sr030pc30_s_ctrl, }; static const struct v4l2_subdev_core_ops sr030pc30_core_ops = { .s_power = sr030pc30_s_power, .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, .g_ctrl = v4l2_subdev_g_ctrl, .s_ctrl = v4l2_subdev_s_ctrl, .queryctrl = v4l2_subdev_queryctrl, .querymenu = v4l2_subdev_querymenu, }; static const struct v4l2_subdev_video_ops sr030pc30_video_ops = { .g_mbus_fmt = sr030pc30_g_fmt, .s_mbus_fmt = sr030pc30_s_fmt, .try_mbus_fmt = sr030pc30_try_fmt, .enum_mbus_fmt = sr030pc30_enum_fmt, }; static const struct v4l2_subdev_ops sr030pc30_ops = { .core = &sr030pc30_core_ops, .video = &sr030pc30_video_ops, }; /* * Detect sensor type. Return 0 if SR030PC30 was detected * or -ENODEV otherwise. */ static int sr030pc30_detect(struct i2c_client *client) { const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; /* Enable sensor's power and clock */ if (pdata->set_power) { ret = pdata->set_power(&client->dev, 1); if (ret) return ret; } ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG); if (pdata->set_power) pdata->set_power(&client->dev, 0); if (ret < 0) { dev_err(&client->dev, "%s: I2C read failed\n", __func__); return ret; } return ret == SR030PC30_ID ? 0 : -ENODEV; } static int sr030pc30_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct sr030pc30_info *info; struct v4l2_subdev *sd; struct v4l2_ctrl_handler *hdl; const struct sr030pc30_platform_data *pdata = client->dev.platform_data; int ret; if (!pdata) { dev_err(&client->dev, "No platform data!"); return -EIO; } ret = sr030pc30_detect(client); if (ret) return ret; info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; sd = &info->sd; strcpy(sd->name, MODULE_NAME); info->pdata = client->dev.platform_data; v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops); hdl = &info->hdl; v4l2_ctrl_handler_init(hdl, 6); info->awb = v4l2_ctrl_new_std(hdl, &sr030pc30_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); info->red = v4l2_ctrl_new_std(hdl, &sr030pc30_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 127, 1, 64); info->blue = v4l2_ctrl_new_std(hdl, &sr030pc30_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 127, 1, 64); info->autoexp = v4l2_ctrl_new_std(hdl, &sr030pc30_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 0, 1, 1, 1); info->exp = v4l2_ctrl_new_std(hdl, &sr030pc30_ctrl_ops, V4L2_CID_EXPOSURE, EXPOS_MIN_MS, EXPOS_MAX_MS, 1, 30); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } v4l2_ctrl_auto_cluster(3, &info->awb, 0, false); v4l2_ctrl_auto_cluster(2, &info->autoexp, V4L2_EXPOSURE_MANUAL, false); v4l2_ctrl_handler_setup(hdl); info->i2c_reg_page = -1; info->hflip = 1; return 0; } static int sr030pc30_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); return 0; } static const struct i2c_device_id sr030pc30_id[] = { { MODULE_NAME, 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sr030pc30_id); static struct i2c_driver sr030pc30_i2c_driver = { .driver = { .name = MODULE_NAME }, .probe = sr030pc30_probe, .remove = sr030pc30_remove, .id_table = sr030pc30_id, }; module_i2c_driver(sr030pc30_i2c_driver); MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver"); MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
PoonKang/Kernel_GT-N8013_JB
drivers/misc/mpu3050/accel/kxud9.c
532
3982
/* $License: Copyright (C) 2010 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ /** * @defgroup ACCELDL (Motion Library - Accelerometer Driver Layer) * @brief Provides the interface to setup and handle an accelerometers * connected to the secondary I2C interface of the gyroscope. * * @{ * @file kxud9.c * @brief Accelerometer setup and handling methods. */ /* ------------------ */ /* - Include Files. - */ /* ------------------ */ #ifdef __KERNEL__ #include <linux/kernel.h> #include <linux/module.h> #endif #include "mpu.h" #include "mlsl.h" #include "mlos.h" #include <log.h> #undef MPL_LOG_TAG #define MPL_LOG_TAG "MPL-acc" /* --------------------- */ /* - Variables. - */ /* --------------------- */ /***************************************** Accelerometer Initialization Functions *****************************************/ static int kxud9_suspend(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { int result; /* CTRL_REGB: low-power standby mode */ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, 0x0d, 0x0); ERROR_CHECK(result); return result; } /* full scale setting - register and mask */ #define ACCEL_KIONIX_CTRL_REG (0x0C) #define ACCEL_KIONIX_CTRL_MASK (0x3) static int kxud9_resume(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata) { int result = ML_SUCCESS; unsigned char reg; /* Full Scale */ reg = 0x0; reg &= ~ACCEL_KIONIX_CTRL_MASK; reg |= 0x00; if (slave->range.mantissa == 4) { /* 4g scale = 4.9951 */ reg |= 0x2; slave->range.fraction = 9951; } else if (slave->range.mantissa == 7) { /* 6g scale = 7.5018 */ reg |= 0x1; slave->range.fraction = 5018; } else if (slave->range.mantissa == 9) { /* 8g scale = 9.9902 */ reg |= 0x0; slave->range.fraction = 9902; } else { slave->range.mantissa = 2; /* 2g scale = 2.5006 */ slave->range.fraction = 5006; reg |= 0x3; } reg |= 0xC0; /* 100Hz LPF */ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, ACCEL_KIONIX_CTRL_REG, reg); ERROR_CHECK(result); /* normal operation */ result = MLSLSerialWriteSingle(mlsl_handle, pdata->address, 0x0d, 0x40); ERROR_CHECK(result); return ML_SUCCESS; } static int kxud9_read(void *mlsl_handle, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, unsigned char *data) { int result; result = MLSLSerialRead(mlsl_handle, pdata->address, slave->reg, slave->len, data); return result; } static struct ext_slave_descr kxud9_descr = { /*.init = */ NULL, /*.exit = */ NULL, /*.suspend = */ kxud9_suspend, /*.resume = */ kxud9_resume, /*.read = */ kxud9_read, /*.config = */ NULL, /*.get_config = */ NULL, /*.name = */ "kxud9", /*.type = */ EXT_SLAVE_TYPE_ACCELEROMETER, /*.id = */ /* ACCEL_ID_KXUD9, */ ACCEL_ID_KXSD9, /*.reg = */ 0x00, /*.len = */ 6, /*.endian = */ EXT_SLAVE_BIG_ENDIAN, /*.range = */ {2, 5006}, }; struct ext_slave_descr *kxud9_get_slave_descr(void) { return &kxud9_descr; } EXPORT_SYMBOL(kxud9_get_slave_descr); /** * @} **/
gpl-2.0
rmcc/commtiva-kernel-z71
fs/ext2/xattr_security.c
1044
1859
/* * linux/fs/ext2/xattr_security.c * Handler for storing security labels as extended attributes. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/ext2_fs.h> #include <linux/security.h> #include "xattr.h" static size_t ext2_xattr_security_list(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { const int prefix_len = XATTR_SECURITY_PREFIX_LEN; const size_t total_len = prefix_len + name_len + 1; if (list && total_len <= list_size) { memcpy(list, XATTR_SECURITY_PREFIX, prefix_len); memcpy(list+prefix_len, name, name_len); list[prefix_len + name_len] = '\0'; } return total_len; } static int ext2_xattr_security_get(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext2_xattr_get(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name, buffer, size); } static int ext2_xattr_security_set(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { if (strcmp(name, "") == 0) return -EINVAL; return ext2_xattr_set(dentry->d_inode, EXT2_XATTR_INDEX_SECURITY, name, value, size, flags); } int ext2_init_security(struct inode *inode, struct inode *dir) { int err; size_t len; void *value; char *name; err = security_inode_init_security(inode, dir, &name, &value, &len); if (err) { if (err == -EOPNOTSUPP) return 0; return err; } err = ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name, value, len, 0); kfree(name); kfree(value); return err; } const struct xattr_handler ext2_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .list = ext2_xattr_security_list, .get = ext2_xattr_security_get, .set = ext2_xattr_security_set, };
gpl-2.0
IMCG/fastsocket
kernel/net/irda/parameters.c
1300
15705
/********************************************************************* * * Filename: parameters.c * Version: 1.0 * Description: A more general way to handle (pi,pl,pv) parameters * Status: Experimental. * Author: Dag Brattli <dagb@cs.uit.no> * Created at: Mon Jun 7 10:25:11 1999 * Modified at: Sun Jan 30 14:08:39 2000 * Modified by: Dag Brattli <dagb@cs.uit.no> * * Copyright (c) 1999-2000 Dag Brattli, All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA * ********************************************************************/ #include <linux/types.h> #include <linux/module.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <net/irda/irda.h> #include <net/irda/parameters.h> static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_extract_no_value(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func); static int irda_param_unpack(__u8 *buf, char *fmt, ...); /* Parameter value call table. Must match PV_TYPE */ static PV_HANDLER pv_extract_table[] = { irda_extract_integer, /* Handler for any length integers */ irda_extract_integer, /* Handler for 8 bits integers */ irda_extract_integer, /* Handler for 16 bits integers */ irda_extract_string, /* Handler for strings */ irda_extract_integer, /* Handler for 32 bits integers */ irda_extract_octseq, /* Handler for octet sequences */ irda_extract_no_value /* Handler for no value parameters */ }; static PV_HANDLER pv_insert_table[] = { irda_insert_integer, /* Handler for any length integers */ irda_insert_integer, /* Handler for 8 bits integers */ irda_insert_integer, /* Handler for 16 bits integers */ NULL, /* Handler for strings */ irda_insert_integer, /* Handler for 32 bits integers */ NULL, /* Handler for octet sequences */ irda_insert_no_value /* Handler for no value parameters */ }; /* * Function irda_insert_no_value (self, buf, len, pi, type, func) */ static int irda_insert_no_value(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { irda_param_t p; int ret; p.pi = pi; p.pl = 0; /* Call handler for this parameter */ ret = (*func)(self, &p, PV_GET); /* Extract values anyway, since handler may need them */ irda_param_pack(buf, "bb", p.pi, p.pl); if (ret < 0) return ret; return 2; /* Inserted pl+2 bytes */ } /* * Function irda_extract_no_value (self, buf, len, type, func) * * Extracts a parameter without a pv field (pl=0) * */ static int irda_extract_no_value(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { irda_param_t p; int ret; /* Extract values anyway, since handler may need them */ irda_param_unpack(buf, "bb", &p.pi, &p.pl); /* Call handler for this parameter */ ret = (*func)(self, &p, PV_PUT); if (ret < 0) return ret; return 2; /* Extracted pl+2 bytes */ } /* * Function irda_insert_integer (self, buf, len, pi, type, func) */ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { irda_param_t p; int n = 0; int err; p.pi = pi; /* In case handler needs to know */ p.pl = type & PV_MASK; /* The integer type codes the length as well */ p.pv.i = 0; /* Clear value */ /* Call handler for this parameter */ err = (*func)(self, &p, PV_GET); if (err < 0) return err; /* * If parameter length is still 0, then (1) this is an any length * integer, and (2) the handler function does not care which length * we choose to use, so we pick the one the gives the fewest bytes. */ if (p.pl == 0) { if (p.pv.i < 0xff) { IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__); p.pl = 1; } else if (p.pv.i < 0xffff) { IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__); p.pl = 2; } else { IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__); p.pl = 4; /* Default length */ } } /* Check if buffer is long enough for insertion */ if (len < (2+p.pl)) { IRDA_WARNING("%s: buffer too short for insertion!\n", __func__); return -1; } IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, p.pi, p.pl, p.pv.i); switch (p.pl) { case 1: n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i); break; case 2: if (type & PV_BIG_ENDIAN) p.pv.i = cpu_to_be16((__u16) p.pv.i); else p.pv.i = cpu_to_le16((__u16) p.pv.i); n += irda_param_pack(buf, "bbs", p.pi, p.pl, (__u16) p.pv.i); break; case 4: if (type & PV_BIG_ENDIAN) cpu_to_be32s(&p.pv.i); else cpu_to_le32s(&p.pv.i); n += irda_param_pack(buf, "bbi", p.pi, p.pl, p.pv.i); break; default: IRDA_WARNING("%s: length %d not supported\n", __func__, p.pl); /* Skip parameter */ return -1; } return p.pl+2; /* Inserted pl+2 bytes */ } /* * Function irda_extract integer (self, buf, len, pi, type, func) * * Extract a possibly variable length integer from buffer, and call * handler for processing of the parameter */ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { irda_param_t p; int n = 0; int extract_len; /* Real length we extract */ int err; p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ p.pv.i = 0; /* Clear value */ extract_len = p.pl; /* Default : extract all */ /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { IRDA_WARNING("%s: buffer too short for parsing! " "Need %d bytes, but len is only %d\n", __func__, p.pl, len); return -1; } /* * Check that the integer length is what we expect it to be. If the * handler want a 16 bits integer then a 32 bits is not good enough * PV_INTEGER means that the handler is flexible. */ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { IRDA_ERROR("%s: invalid parameter length! " "Expected %d bytes, but value had %d bytes!\n", __func__, type & PV_MASK, p.pl); /* Most parameters are bit/byte fields or little endian, * so it's ok to only extract a subset of it (the subset * that the handler expect). This is necessary, as some * broken implementations seems to add extra undefined bits. * If the parameter is shorter than we expect or is big * endian, we can't play those tricks. Jean II */ if((p.pl < (type & PV_MASK)) || (type & PV_BIG_ENDIAN)) { /* Skip parameter */ return p.pl+2; } else { /* Extract subset of it, fallthrough */ extract_len = type & PV_MASK; } } switch (extract_len) { case 1: n += irda_param_unpack(buf+2, "b", &p.pv.i); break; case 2: n += irda_param_unpack(buf+2, "s", &p.pv.i); if (type & PV_BIG_ENDIAN) p.pv.i = be16_to_cpu((__u16) p.pv.i); else p.pv.i = le16_to_cpu((__u16) p.pv.i); break; case 4: n += irda_param_unpack(buf+2, "i", &p.pv.i); if (type & PV_BIG_ENDIAN) be32_to_cpus(&p.pv.i); else le32_to_cpus(&p.pv.i); break; default: IRDA_WARNING("%s: length %d not supported\n", __func__, p.pl); /* Skip parameter */ return p.pl+2; } IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, p.pi, p.pl, p.pv.i); /* Call handler for this parameter */ err = (*func)(self, &p, PV_PUT); if (err < 0) return err; return p.pl+2; /* Extracted pl+2 bytes */ } /* * Function irda_extract_string (self, buf, len, type, func) */ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { char str[33]; irda_param_t p; int err; IRDA_DEBUG(2, "%s()\n", __func__); p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__, p.pi, p.pl); /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { IRDA_WARNING("%s: buffer too short for parsing! " "Need %d bytes, but len is only %d\n", __func__, p.pl, len); return -1; } /* Should be safe to copy string like this since we have already * checked that the buffer is long enough */ strncpy(str, buf+2, p.pl); IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__, (__u8) str[0], (__u8) str[1]); /* Null terminate string */ str[p.pl+1] = '\0'; p.pv.c = str; /* Handler will need to take a copy */ /* Call handler for this parameter */ err = (*func)(self, &p, PV_PUT); if (err < 0) return err; return p.pl+2; /* Extracted pl+2 bytes */ } /* * Function irda_extract_octseq (self, buf, len, type, func) */ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi, PV_TYPE type, PI_HANDLER func) { irda_param_t p; p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { IRDA_WARNING("%s: buffer too short for parsing! " "Need %d bytes, but len is only %d\n", __func__, p.pl, len); return -1; } IRDA_DEBUG(0, "%s(), not impl\n", __func__); return p.pl+2; /* Extracted pl+2 bytes */ } /* * Function irda_param_pack (skb, fmt, ...) * * Format: * 'i' = 32 bits integer * 's' = string * */ int irda_param_pack(__u8 *buf, char *fmt, ...) { irda_pv_t arg; va_list args; char *p; int n = 0; va_start(args, fmt); for (p = fmt; *p != '\0'; p++) { switch (*p) { case 'b': /* 8 bits unsigned byte */ buf[n++] = (__u8)va_arg(args, int); break; case 's': /* 16 bits unsigned short */ arg.i = (__u16)va_arg(args, int); put_unaligned((__u16)arg.i, (__u16 *)(buf+n)); n+=2; break; case 'i': /* 32 bits unsigned integer */ arg.i = va_arg(args, __u32); put_unaligned(arg.i, (__u32 *)(buf+n)); n+=4; break; #if 0 case 'c': /* \0 terminated string */ arg.c = va_arg(args, char *); strcpy(buf+n, arg.c); n += strlen(arg.c) + 1; break; #endif default: va_end(args); return -1; } } va_end(args); return 0; } EXPORT_SYMBOL(irda_param_pack); /* * Function irda_param_unpack (skb, fmt, ...) */ static int irda_param_unpack(__u8 *buf, char *fmt, ...) { irda_pv_t arg; va_list args; char *p; int n = 0; va_start(args, fmt); for (p = fmt; *p != '\0'; p++) { switch (*p) { case 'b': /* 8 bits byte */ arg.ip = va_arg(args, __u32 *); *arg.ip = buf[n++]; break; case 's': /* 16 bits short */ arg.ip = va_arg(args, __u32 *); *arg.ip = get_unaligned((__u16 *)(buf+n)); n+=2; break; case 'i': /* 32 bits unsigned integer */ arg.ip = va_arg(args, __u32 *); *arg.ip = get_unaligned((__u32 *)(buf+n)); n+=4; break; #if 0 case 'c': /* \0 terminated string */ arg.c = va_arg(args, char *); strcpy(arg.c, buf+n); n += strlen(arg.c) + 1; break; #endif default: va_end(args); return -1; } } va_end(args); return 0; } /* * Function irda_param_insert (self, pi, buf, len, info) * * Insert the specified parameter (pi) into buffer. Returns number of * bytes inserted */ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, pi_param_info_t *info) { pi_minor_info_t *pi_minor_info; __u8 pi_minor; __u8 pi_major; int type; int ret = -1; int n = 0; IRDA_ASSERT(buf != NULL, return ret;); IRDA_ASSERT(info != NULL, return ret;); pi_minor = pi & info->pi_mask; pi_major = pi >> info->pi_major_offset; /* Check if the identifier value (pi) is valid */ if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", __func__, pi); /* Skip this parameter */ return -1; } /* Lookup the info on how to parse this parameter */ pi_minor_info = &info->tables[pi_major].pi_minor_call_table[pi_minor]; /* Find expected data type for this parameter identifier (pi)*/ type = pi_minor_info->type; /* Check if handler has been implemented */ if (!pi_minor_info->func) { IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi); /* Skip this parameter */ return -1; } /* Insert parameter value */ ret = (*pv_insert_table[type & PV_MASK])(self, buf+n, len, pi, type, pi_minor_info->func); return ret; } EXPORT_SYMBOL(irda_param_insert); /* * Function irda_param_extract (self, buf, len, info) * * Parse all parameters. If len is correct, then everything should be * safe. Returns the number of bytes that was parsed * */ static int irda_param_extract(void *self, __u8 *buf, int len, pi_param_info_t *info) { pi_minor_info_t *pi_minor_info; __u8 pi_minor; __u8 pi_major; int type; int ret = -1; int n = 0; IRDA_ASSERT(buf != NULL, return ret;); IRDA_ASSERT(info != NULL, return ret;); pi_minor = buf[n] & info->pi_mask; pi_major = buf[n] >> info->pi_major_offset; /* Check if the identifier value (pi) is valid */ if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", __func__, buf[0]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ } /* Lookup the info on how to parse this parameter */ pi_minor_info = &info->tables[pi_major].pi_minor_call_table[pi_minor]; /* Find expected data type for this parameter identifier (pi)*/ type = pi_minor_info->type; IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__, pi_major, pi_minor, type); /* Check if handler has been implemented */ if (!pi_minor_info->func) { IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, buf[n]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ } /* Parse parameter value */ ret = (*pv_extract_table[type & PV_MASK])(self, buf+n, len, buf[n], type, pi_minor_info->func); return ret; } /* * Function irda_param_extract_all (self, buf, len, info) * * Parse all parameters. If len is correct, then everything should be * safe. Returns the number of bytes that was parsed * */ int irda_param_extract_all(void *self, __u8 *buf, int len, pi_param_info_t *info) { int ret = -1; int n = 0; IRDA_ASSERT(buf != NULL, return ret;); IRDA_ASSERT(info != NULL, return ret;); /* * Parse all parameters. Each parameter must be at least two bytes * long or else there is no point in trying to parse it */ while (len > 2) { ret = irda_param_extract(self, buf+n, len, info); if (ret < 0) return ret; n += ret; len -= ret; } return n; } EXPORT_SYMBOL(irda_param_extract_all);
gpl-2.0
magyarm/bluetooth-next
drivers/uio/uio_mf624.c
2068
6223
/* * UIO driver fo Humusoft MF624 DAQ card. * Copyright (C) 2011 Rostislav Lisovy <lisovy@gmail.com>, * Czech Technical University in Prague * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/uio_driver.h> #define PCI_VENDOR_ID_HUMUSOFT 0x186c #define PCI_DEVICE_ID_MF624 0x0624 #define PCI_SUBVENDOR_ID_HUMUSOFT 0x186c #define PCI_SUBDEVICE_DEVICE 0x0624 /* BAR0 Interrupt control/status register */ #define INTCSR 0x4C #define INTCSR_ADINT_ENABLE (1 << 0) #define INTCSR_CTR4INT_ENABLE (1 << 3) #define INTCSR_PCIINT_ENABLE (1 << 6) #define INTCSR_ADINT_STATUS (1 << 2) #define INTCSR_CTR4INT_STATUS (1 << 5) enum mf624_interrupt_source {ADC, CTR4, ALL}; static void mf624_disable_interrupt(enum mf624_interrupt_source source, struct uio_info *info) { void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; switch (source) { case ADC: iowrite32(ioread32(INTCSR_reg) & ~(INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE), INTCSR_reg); break; case CTR4: iowrite32(ioread32(INTCSR_reg) & ~(INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE), INTCSR_reg); break; case ALL: default: iowrite32(ioread32(INTCSR_reg) & ~(INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE), INTCSR_reg); break; } } static void mf624_enable_interrupt(enum mf624_interrupt_source source, struct uio_info *info) { void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; switch (source) { case ADC: iowrite32(ioread32(INTCSR_reg) | INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE, INTCSR_reg); break; case CTR4: iowrite32(ioread32(INTCSR_reg) | INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE, INTCSR_reg); break; case ALL: default: iowrite32(ioread32(INTCSR_reg) | INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE, INTCSR_reg); break; } } static irqreturn_t mf624_irq_handler(int irq, struct uio_info *info) { void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR; if ((ioread32(INTCSR_reg) & INTCSR_ADINT_ENABLE) && (ioread32(INTCSR_reg) & INTCSR_ADINT_STATUS)) { mf624_disable_interrupt(ADC, info); return IRQ_HANDLED; } if ((ioread32(INTCSR_reg) & INTCSR_CTR4INT_ENABLE) && (ioread32(INTCSR_reg) & INTCSR_CTR4INT_STATUS)) { mf624_disable_interrupt(CTR4, info); return IRQ_HANDLED; } return IRQ_NONE; } static int mf624_irqcontrol(struct uio_info *info, s32 irq_on) { if (irq_on == 0) mf624_disable_interrupt(ALL, info); else if (irq_on == 1) mf624_enable_interrupt(ALL, info); return 0; } static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) goto out_free; if (pci_request_regions(dev, "mf624")) goto out_disable; info->name = "mf624"; info->version = "0.0.1"; /* Note: Datasheet says device uses BAR0, BAR1, BAR2 -- do not trust it */ /* BAR0 */ info->mem[0].name = "PCI chipset, interrupts, status " "bits, special functions"; info->mem[0].addr = pci_resource_start(dev, 0); if (!info->mem[0].addr) goto out_release; info->mem[0].size = pci_resource_len(dev, 0); info->mem[0].memtype = UIO_MEM_PHYS; info->mem[0].internal_addr = pci_ioremap_bar(dev, 0); if (!info->mem[0].internal_addr) goto out_release; /* BAR2 */ info->mem[1].name = "ADC, DAC, DIO"; info->mem[1].addr = pci_resource_start(dev, 2); if (!info->mem[1].addr) goto out_unmap0; info->mem[1].size = pci_resource_len(dev, 2); info->mem[1].memtype = UIO_MEM_PHYS; info->mem[1].internal_addr = pci_ioremap_bar(dev, 2); if (!info->mem[1].internal_addr) goto out_unmap0; /* BAR4 */ info->mem[2].name = "Counter/timer chip"; info->mem[2].addr = pci_resource_start(dev, 4); if (!info->mem[2].addr) goto out_unmap1; info->mem[2].size = pci_resource_len(dev, 4); info->mem[2].memtype = UIO_MEM_PHYS; info->mem[2].internal_addr = pci_ioremap_bar(dev, 4); if (!info->mem[2].internal_addr) goto out_unmap1; info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = mf624_irq_handler; info->irqcontrol = mf624_irqcontrol; if (uio_register_device(&dev->dev, info)) goto out_unmap2; pci_set_drvdata(dev, info); return 0; out_unmap2: iounmap(info->mem[2].internal_addr); out_unmap1: iounmap(info->mem[1].internal_addr); out_unmap0: iounmap(info->mem[0].internal_addr); out_release: pci_release_regions(dev); out_disable: pci_disable_device(dev); out_free: kfree(info); return -ENODEV; } static void mf624_pci_remove(struct pci_dev *dev) { struct uio_info *info = pci_get_drvdata(dev); mf624_disable_interrupt(ALL, info); uio_unregister_device(info); pci_release_regions(dev); pci_disable_device(dev); iounmap(info->mem[0].internal_addr); iounmap(info->mem[1].internal_addr); iounmap(info->mem[2].internal_addr); kfree(info); } static const struct pci_device_id mf624_pci_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) }, { 0, } }; static struct pci_driver mf624_pci_driver = { .name = "mf624", .id_table = mf624_pci_id, .probe = mf624_pci_probe, .remove = mf624_pci_remove, }; MODULE_DEVICE_TABLE(pci, mf624_pci_id); module_pci_driver(mf624_pci_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
gpl-2.0
syhost/android_kernel_zte_n918st
arch/mips/sni/setup.c
2068
5581
/* * Setup pointers to hardware-dependent routines. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996, 97, 98, 2000, 03, 04, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006,2007 Thomas Bogendoerfer (tsbogend@alpha.franken.de) */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/export.h> #include <linux/console.h> #include <linux/fb.h> #include <linux/screen_info.h> #ifdef CONFIG_FW_ARC #include <asm/fw/arc/types.h> #include <asm/sgialib.h> #endif #ifdef CONFIG_FW_SNIPROM #include <asm/mipsprom.h> #endif #include <asm/bootinfo.h> #include <asm/io.h> #include <asm/reboot.h> #include <asm/sni.h> unsigned int sni_brd_type; EXPORT_SYMBOL(sni_brd_type); extern void sni_machine_restart(char *command); extern void sni_machine_power_off(void); static void __init sni_display_setup(void) { #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) && defined(CONFIG_FW_ARC) struct screen_info *si = &screen_info; DISPLAY_STATUS *di; di = ArcGetDisplayStatus(1); if (di) { si->orig_x = di->CursorXPosition; si->orig_y = di->CursorYPosition; si->orig_video_cols = di->CursorMaxXPosition; si->orig_video_lines = di->CursorMaxYPosition; si->orig_video_isVGA = VIDEO_TYPE_VGAC; si->orig_video_points = 16; } #endif } static void __init sni_console_setup(void) { #ifndef CONFIG_FW_ARC char *ctype; char *cdev; char *baud; int port; static char options[8] __initdata; cdev = prom_getenv("console_dev"); if (strncmp(cdev, "tty", 3) == 0) { ctype = prom_getenv("console"); switch (*ctype) { default: case 'l': port = 0; baud = prom_getenv("lbaud"); break; case 'r': port = 1; baud = prom_getenv("rbaud"); break; } if (baud) strcpy(options, baud); if (strncmp(cdev, "tty552", 6) == 0) add_preferred_console("ttyS", port, baud ? options : NULL); else add_preferred_console("ttySC", port, baud ? options : NULL); } #endif } #ifdef DEBUG static void __init sni_idprom_dump(void) { int i; pr_debug("SNI IDProm dump:\n"); for (i = 0; i < 256; i++) { if (i%16 == 0) pr_debug("%04x ", i); printk("%02x ", *(unsigned char *) (SNI_IDPROM_BASE + i)); if (i % 16 == 15) printk("\n"); } } #endif void __init plat_mem_setup(void) { int cputype; set_io_port_base(SNI_PORT_BASE); // ioport_resource.end = sni_io_resource.end; /* * Setup (E)ISA I/O memory access stuff */ #ifdef CONFIG_EISA EISA_bus = 1; #endif sni_brd_type = *(unsigned char *)SNI_IDPROM_BRDTYPE; cputype = *(unsigned char *)SNI_IDPROM_CPUTYPE; switch (sni_brd_type) { case SNI_BRD_TOWER_OASIC: switch (cputype) { case SNI_CPU_M8030: system_type = "RM400-330"; break; case SNI_CPU_M8031: system_type = "RM400-430"; break; case SNI_CPU_M8037: system_type = "RM400-530"; break; case SNI_CPU_M8034: system_type = "RM400-730"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_MINITOWER: switch (cputype) { case SNI_CPU_M8021: case SNI_CPU_M8043: system_type = "RM400-120"; break; case SNI_CPU_M8040: system_type = "RM400-220"; break; case SNI_CPU_M8053: system_type = "RM400-225"; break; case SNI_CPU_M8050: system_type = "RM400-420"; break; default: system_type = "RM400-xxx"; break; } break; case SNI_BRD_PCI_TOWER: system_type = "RM400-Cxx"; break; case SNI_BRD_RM200: system_type = "RM200-xxx"; break; case SNI_BRD_PCI_MTOWER: system_type = "RM300-Cxx"; break; case SNI_BRD_PCI_DESKTOP: switch (read_c0_prid() & 0xff00) { case PRID_IMP_R4600: case PRID_IMP_R4700: system_type = "RM200-C20"; break; case PRID_IMP_R5000: system_type = "RM200-C40"; break; default: system_type = "RM200-Cxx"; break; } break; case SNI_BRD_PCI_TOWER_CPLUS: system_type = "RM400-Exx"; break; case SNI_BRD_PCI_MTOWER_CPLUS: system_type = "RM300-Exx"; break; } pr_debug("Found SNI brdtype %02x name %s\n", sni_brd_type, system_type); #ifdef DEBUG sni_idprom_dump(); #endif switch (sni_brd_type) { case SNI_BRD_10: case SNI_BRD_10NEW: case SNI_BRD_TOWER_OASIC: case SNI_BRD_MINITOWER: sni_a20r_init(); break; case SNI_BRD_PCI_TOWER: case SNI_BRD_PCI_TOWER_CPLUS: sni_pcit_init(); break; case SNI_BRD_RM200: sni_rm200_init(); break; case SNI_BRD_PCI_MTOWER: case SNI_BRD_PCI_DESKTOP: case SNI_BRD_PCI_MTOWER_CPLUS: sni_pcimt_init(); break; } _machine_restart = sni_machine_restart; pm_power_off = sni_machine_power_off; sni_display_setup(); sni_console_setup(); } #ifdef CONFIG_PCI #include <linux/pci.h> #include <video/vga.h> #include <video/cirrus.h> static void quirk_cirrus_ram_size(struct pci_dev *dev) { u16 cmd; /* * firmware doesn't set the ram size correct, so we * need to do it here, otherwise we get screen corruption * on older Cirrus chips */ pci_read_config_word(dev, PCI_COMMAND, &cmd); if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) { vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */ vga_wseq(NULL, CL_SEQRF, 0x18); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436, quirk_cirrus_ram_size); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, quirk_cirrus_ram_size); #endif
gpl-2.0
andr00ib/3.0.94-victor-kernel
arch/tile/lib/atomic_32.c
2324
9702
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/cache.h> #include <linux/delay.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/atomic.h> #include <asm/futex.h> #include <arch/chip.h> /* See <asm/atomic_32.h> */ #if ATOMIC_LOCKS_FOUND_VIA_TABLE() /* * A block of memory containing locks for atomic ops. Each instance of this * struct will be homed on a different CPU. */ struct atomic_locks_on_cpu { int lock[ATOMIC_HASH_L2_SIZE]; } __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4))); static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool); /* The locks we'll use until __init_atomic_per_cpu is called. */ static struct atomic_locks_on_cpu __initdata initial_atomic_locks; /* Hash into this vector to get a pointer to lock for the given atomic. */ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE] __write_once = { [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks) }; #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ /* This page is remapped on startup to be hash-for-home. */ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss; #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ static inline int *__atomic_hashed_lock(volatile void *v) { /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */ #if ATOMIC_LOCKS_FOUND_VIA_TABLE() unsigned long i = (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long)); unsigned long n = __insn_crc32_32(0, i); /* Grab high bits for L1 index. */ unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT); /* Grab low bits for L2 index. */ unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1); return &atomic_lock_ptr[l1_index]->lock[l2_index]; #else /* * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. * Using mm works here because atomic_locks is page aligned. */ unsigned long ptr = __insn_mm((unsigned long)v >> 1, (unsigned long)atomic_locks, 2, (ATOMIC_HASH_SHIFT + 2) - 1); return (int *)ptr; #endif } #ifdef CONFIG_SMP /* Return whether the passed pointer is a valid atomic lock pointer. */ static int is_atomic_lock(int *p) { #if ATOMIC_LOCKS_FOUND_VIA_TABLE() int i; for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { if (p >= &atomic_lock_ptr[i]->lock[0] && p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) { return 1; } } return 0; #else return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; #endif } void __atomic_fault_unlock(int *irqlock_word) { BUG_ON(!is_atomic_lock(irqlock_word)); BUG_ON(*irqlock_word != 1); *irqlock_word = 0; } #endif /* CONFIG_SMP */ static inline int *__atomic_setup(volatile void *v) { /* Issue a load to the target to bring it into cache. */ *(volatile int *)v; return __atomic_hashed_lock(v); } int _atomic_xchg(atomic_t *v, int n) { return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; } EXPORT_SYMBOL(_atomic_xchg); int _atomic_xchg_add(atomic_t *v, int i) { return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; } EXPORT_SYMBOL(_atomic_xchg_add); int _atomic_xchg_add_unless(atomic_t *v, int a, int u) { /* * Note: argument order is switched here since it is easier * to use the first argument consistently as the "old value" * in the assembly, as is done for _atomic_cmpxchg(). */ return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) .val; } EXPORT_SYMBOL(_atomic_xchg_add_unless); int _atomic_cmpxchg(atomic_t *v, int o, int n) { return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; } EXPORT_SYMBOL(_atomic_cmpxchg); unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) { return __atomic_or((int *)p, __atomic_setup(p), mask).val; } EXPORT_SYMBOL(_atomic_or); unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) { return __atomic_andn((int *)p, __atomic_setup(p), mask).val; } EXPORT_SYMBOL(_atomic_andn); unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) { return __atomic_xor((int *)p, __atomic_setup(p), mask).val; } EXPORT_SYMBOL(_atomic_xor); u64 _atomic64_xchg(atomic64_t *v, u64 n) { return __atomic64_xchg(&v->counter, __atomic_setup(v), n); } EXPORT_SYMBOL(_atomic64_xchg); u64 _atomic64_xchg_add(atomic64_t *v, u64 i) { return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); } EXPORT_SYMBOL(_atomic64_xchg_add); u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) { /* * Note: argument order is switched here since it is easier * to use the first argument consistently as the "old value" * in the assembly, as is done for _atomic_cmpxchg(). */ return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), u, a); } EXPORT_SYMBOL(_atomic64_xchg_add_unless); u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) { return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); } EXPORT_SYMBOL(_atomic64_cmpxchg); static inline int *__futex_setup(int __user *v) { /* * Issue a prefetch to the counter to bring it into cache. * As for __atomic_setup, but we can't do a read into the L1 * since it might fault; instead we do a prefetch into the L2. */ __insn_prefetch(v); return __atomic_hashed_lock((int __force *)v); } struct __get_user futex_set(u32 __user *v, int i) { return __atomic_xchg((int __force *)v, __futex_setup(v), i); } struct __get_user futex_add(u32 __user *v, int n) { return __atomic_xchg_add((int __force *)v, __futex_setup(v), n); } struct __get_user futex_or(u32 __user *v, int n) { return __atomic_or((int __force *)v, __futex_setup(v), n); } struct __get_user futex_andn(u32 __user *v, int n) { return __atomic_andn((int __force *)v, __futex_setup(v), n); } struct __get_user futex_xor(u32 __user *v, int n) { return __atomic_xor((int __force *)v, __futex_setup(v), n); } struct __get_user futex_cmpxchg(u32 __user *v, int o, int n) { return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n); } /* * If any of the atomic or futex routines hit a bad address (not in * the page tables at kernel PL) this routine is called. The futex * routines are never used on kernel space, and the normal atomics and * bitops are never used on user space. So a fault on kernel space * must be fatal, but a fault on userspace is a futex fault and we * need to return -EFAULT. Note that the context this routine is * invoked in is the context of the "_atomic_xxx()" routines called * by the functions in this file. */ struct __get_user __atomic_bad_address(int __user *addr) { if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) panic("Bad address used for kernel atomic op: %p\n", addr); return (struct __get_user) { .err = -EFAULT }; } #if CHIP_HAS_CBOX_HOME_MAP() static int __init noatomichash(char *str) { pr_warning("noatomichash is deprecated.\n"); return 1; } __setup("noatomichash", noatomichash); #endif void __init __init_atomic_per_cpu(void) { #if ATOMIC_LOCKS_FOUND_VIA_TABLE() unsigned int i; int actual_cpu; /* * Before this is called from setup, we just have one lock for * all atomic objects/operations. Here we replace the * elements of atomic_lock_ptr so that they point at per_cpu * integers. This seemingly over-complex approach stems from * the fact that DEFINE_PER_CPU defines an entry for each cpu * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But * for efficient hashing of atomics to their locks we want a * compile time constant power of 2 for the size of this * table, so we use ATOMIC_HASH_SIZE. * * Here we populate atomic_lock_ptr from the per cpu * atomic_lock_pool, interspersing by actual cpu so that * subsequent elements are homed on consecutive cpus. */ actual_cpu = cpumask_first(cpu_possible_mask); for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { /* * Preincrement to slightly bias against using cpu 0, * which has plenty of stuff homed on it already. */ actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask); if (actual_cpu >= nr_cpu_ids) actual_cpu = cpumask_first(cpu_possible_mask); atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu); } #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ /* Validate power-of-two and "bigger than cpus" assumption */ BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); /* * On TILEPro we prefer to use a single hash-for-home * page, since this means atomic operations are less * likely to encounter a TLB fault and thus should * in general perform faster. You may wish to disable * this in situations where few hash-for-home tiles * are configured. */ BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); /* The locks must all fit on one page. */ BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); /* * We use the page offset of the atomic value's address as * an index into atomic_locks, excluding the low 3 bits. * That should not produce more indices than ATOMIC_HASH_SIZE. */ BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ /* The futex code makes this assumption, so we validate it here. */ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); }
gpl-2.0
jashasweejena/VibeKernel
drivers/pnp/pnpbios/core.c
2324
14222
/* * pnpbios -- PnP BIOS driver * * This driver provides access to Plug-'n'-Play services provided by * the PnP BIOS firmware, described in the following documents: * Plug and Play BIOS Specification, Version 1.0A, 5 May 1994 * Plug and Play BIOS Clarification Paper, 6 October 1994 * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corp. * * Originally (C) 1998 Christian Schmidt <schmidt@digadd.de> * Modifications (C) 1998 Tom Lees <tom@lpsg.demon.co.uk> * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net> * Further modifications (C) 2001, 2002 by: * Alan Cox <alan@redhat.com> * Thomas Hood * Brian Gerst <bgerst@didntduck.org> * * Ported to the PnP Layer and several additional improvements (C) 2002 * by Adam Belay <ambx1@neo.rr.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Change Log * * Adam Belay - <ambx1@neo.rr.com> - March 16, 2003 * rev 1.01 Only call pnp_bios_dev_node_info once * Added pnpbios_print_status * Added several new error messages and info messages * Added pnpbios_interface_attach_device * integrated core and proc init system * Introduced PNPMODE flags * Removed some useless includes */ #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/pnp.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/dmi.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/byteorder.h> #include "../base.h" #include "pnpbios.h" /* * * PnP BIOS INTERFACE * */ static union pnp_bios_install_struct *pnp_bios_install = NULL; int pnp_bios_present(void) { return (pnp_bios_install != NULL); } struct pnp_dev_node_info node_info; /* * * DOCKING FUNCTIONS * */ static struct completion unload_sem; /* * (Much of this belongs in a shared routine somewhere) */ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) { char *argv[3], **envp, *buf, *scratch; int i = 0, value; if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL))) return -ENOMEM; if (!(buf = kzalloc(256, GFP_KERNEL))) { kfree(envp); return -ENOMEM; } /* FIXME: if there are actual users of this, it should be * integrated into the driver core and use the usual infrastructure * like sysfs and uevents */ argv[0] = "/sbin/pnpbios"; argv[1] = "dock"; argv[2] = NULL; /* minimal command environment */ envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; #ifdef DEBUG /* hint that policy agent should enter no-stdout debug mode */ envp[i++] = "DEBUG=kernel"; #endif /* extensible set of named bus-specific parameters, * supporting multiple driver selection algorithms. */ scratch = buf; /* action: add, remove */ envp[i++] = scratch; scratch += sprintf(scratch, "ACTION=%s", dock ? "add" : "remove") + 1; /* Report the ident for the dock */ envp[i++] = scratch; scratch += sprintf(scratch, "DOCK=%x/%x/%x", info->location_id, info->serial, info->capabilities); envp[i] = NULL; value = call_usermodehelper(argv [0], argv, envp, UMH_WAIT_EXEC); kfree(buf); kfree(envp); return 0; } /* * Poll the PnP docking at regular intervals */ static int pnp_dock_thread(void *unused) { static struct pnp_docking_station_info now; int docked = -1, d = 0; set_freezable(); while (1) { int status; /* * Poll every 2 seconds */ msleep_interruptible(2000); if (try_to_freeze()) continue; status = pnp_bios_dock_station_info(&now); switch (status) { /* * No dock to manage */ case PNP_FUNCTION_NOT_SUPPORTED: complete_and_exit(&unload_sem, 0); case PNP_SYSTEM_NOT_DOCKED: d = 0; break; case PNP_SUCCESS: d = 1; break; default: pnpbios_print_status("pnp_dock_thread", status); continue; } if (d != docked) { if (pnp_dock_event(d, &now) == 0) { docked = d; #if 0 printk(KERN_INFO "PnPBIOS: Docking station %stached\n", docked ? "at" : "de"); #endif } } } complete_and_exit(&unload_sem, 0); } static int pnpbios_get_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "get resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_read_resources_from_node(dev, node); dev->active = pnp_is_active(dev); kfree(node); return 0; } static int pnpbios_set_resources(struct pnp_dev *dev) { u8 nodenum = dev->number; struct pnp_bios_node *node; int ret; if (!pnpbios_is_dynamic(dev)) return -EPERM; pnp_dbg(&dev->dev, "set resources\n"); node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -1; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } if (pnpbios_write_resources_to_node(dev, node) < 0) { kfree(node); return -1; } ret = pnp_bios_set_dev_node(node->handle, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } static void pnpbios_zero_data_stream(struct pnp_bios_node *node) { unsigned char *p = (char *)node->data; unsigned char *end = (char *)(node->data + node->size); unsigned int len; int i; while ((char *)p < (char *)end) { if (p[0] & 0x80) { /* large tag */ len = (p[2] << 8) | p[1]; p += 3; } else { if (((p[0] >> 3) & 0x0f) == 0x0f) return; len = p[0] & 0x07; p += 1; } for (i = 0; i < len; i++) p[i] = 0; p += len; } printk(KERN_ERR "PnPBIOS: Resource structure did not contain an end tag.\n"); } static int pnpbios_disable_resources(struct pnp_dev *dev) { struct pnp_bios_node *node; u8 nodenum = dev->number; int ret; if (dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) return -EPERM; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return -ENOMEM; if (pnp_bios_get_dev_node(&nodenum, (char)PNPMODE_DYNAMIC, node)) { kfree(node); return -ENODEV; } pnpbios_zero_data_stream(node); ret = pnp_bios_set_dev_node(dev->number, (char)PNPMODE_DYNAMIC, node); kfree(node); if (ret > 0) ret = -1; return ret; } /* PnP Layer support */ struct pnp_protocol pnpbios_protocol = { .name = "Plug and Play BIOS", .get = pnpbios_get_resources, .set = pnpbios_set_resources, .disable = pnpbios_disable_resources, }; static int __init insert_device(struct pnp_bios_node *node) { struct list_head *pos; struct pnp_dev *dev; char id[8]; /* check if the device is already added */ list_for_each(pos, &pnpbios_protocol.devices) { dev = list_entry(pos, struct pnp_dev, protocol_list); if (dev->number == node->handle) return -1; } pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id); dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id); if (!dev) return -1; pnpbios_parse_data_stream(dev, node); dev->active = pnp_is_active(dev); dev->flags = node->flags; if (!(dev->flags & PNPBIOS_NO_CONFIG)) dev->capabilities |= PNP_CONFIGURABLE; if (!(dev->flags & PNPBIOS_NO_DISABLE) && pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_DISABLE; dev->capabilities |= PNP_READ; if (pnpbios_is_dynamic(dev)) dev->capabilities |= PNP_WRITE; if (dev->flags & PNPBIOS_REMOVABLE) dev->capabilities |= PNP_REMOVABLE; /* clear out the damaged flags */ if (!dev->active) pnp_init_resources(dev); pnp_add_device(dev); pnpbios_interface_attach_device(node); return 0; } static void __init build_devlist(void) { u8 nodenum; unsigned int nodes_got = 0; unsigned int devs = 0; struct pnp_bios_node *node; node = kzalloc(node_info.max_node_size, GFP_KERNEL); if (!node) return; for (nodenum = 0; nodenum < 0xff;) { u8 thisnodenum = nodenum; /* eventually we will want to use PNPMODE_STATIC here but for now * dynamic will help us catch buggy bioses to add to the blacklist. */ if (!pnpbios_dont_use_current_config) { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_DYNAMIC, node)) break; } else { if (pnp_bios_get_dev_node (&nodenum, (char)PNPMODE_STATIC, node)) break; } nodes_got++; if (insert_device(node) == 0) devs++; if (nodenum <= thisnodenum) { printk(KERN_ERR "PnPBIOS: build_devlist: Node number 0x%x is out of sequence following node 0x%x. Aborting.\n", (unsigned int)nodenum, (unsigned int)thisnodenum); break; } } kfree(node); printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver\n", nodes_got, nodes_got != 1 ? "s" : "", devs); } /* * * INIT AND EXIT * */ static int pnpbios_disabled; int pnpbios_dont_use_current_config; static int __init pnpbios_setup(char *str) { int invert; while ((str != NULL) && (*str != '\0')) { if (strncmp(str, "off", 3) == 0) pnpbios_disabled = 1; if (strncmp(str, "on", 2) == 0) pnpbios_disabled = 0; invert = (strncmp(str, "no-", 3) == 0); if (invert) str += 3; if (strncmp(str, "curr", 4) == 0) pnpbios_dont_use_current_config = invert; str = strchr(str, ','); if (str != NULL) str += strspn(str, ", \t"); } return 1; } __setup("pnpbios=", pnpbios_setup); /* PnP BIOS signature: "$PnP" */ #define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24)) static int __init pnpbios_probe_system(void) { union pnp_bios_install_struct *check; u8 sum; int length, i; printk(KERN_INFO "PnPBIOS: Scanning system for PnP BIOS support...\n"); /* * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS * structure and, if one is found, sets up the selectors and * entry points */ for (check = (union pnp_bios_install_struct *)__va(0xf0000); check < (union pnp_bios_install_struct *)__va(0xffff0); check = (void *)check + 16) { if (check->fields.signature != PNP_SIGNATURE) continue; printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p\n", check); length = check->fields.length; if (!length) { printk(KERN_ERR "PnPBIOS: installation structure is invalid, skipping\n"); continue; } for (sum = 0, i = 0; i < length; i++) sum += check->chars[i]; if (sum) { printk(KERN_ERR "PnPBIOS: installation structure is corrupted, skipping\n"); continue; } if (check->fields.version < 0x10) { printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported\n", check->fields.version >> 4, check->fields.version & 15); continue; } printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x\n", check->fields.version >> 4, check->fields.version & 15, check->fields.pm16cseg, check->fields.pm16offset, check->fields.pm16dseg); pnp_bios_install = check; return 1; } printk(KERN_INFO "PnPBIOS: PnP BIOS support was not detected.\n"); return 0; } static int __init exploding_pnp_bios(const struct dmi_system_id *d) { printk(KERN_WARNING "%s detected. Disabling PnPBIOS\n", d->ident); return 0; } static struct dmi_system_id pnpbios_dmi_table[] __initdata = { { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "Higraded P14H", .matches = { DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), DMI_MATCH(DMI_BIOS_VERSION, "07.00T"), DMI_MATCH(DMI_SYS_VENDOR, "Higraded"), DMI_MATCH(DMI_PRODUCT_NAME, "P14H"), }, }, { /* PnPBIOS GPF on boot */ .callback = exploding_pnp_bios, .ident = "ASUS P4P800", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), DMI_MATCH(DMI_BOARD_NAME, "P4P800"), }, }, {} }; static int __init pnpbios_init(void) { int ret; if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) || paravirt_enabled()) { printk(KERN_INFO "PnPBIOS: Disabled\n"); return -ENODEV; } #ifdef CONFIG_PNPACPI if (!acpi_disabled && !pnpacpi_disabled) { pnpbios_disabled = 1; printk(KERN_INFO "PnPBIOS: Disabled by ACPI PNP\n"); return -ENODEV; } #endif /* CONFIG_ACPI */ /* scan the system for pnpbios support */ if (!pnpbios_probe_system()) return -ENODEV; /* make preparations for bios calls */ pnpbios_calls_init(pnp_bios_install); /* read the node info */ ret = pnp_bios_dev_node_info(&node_info); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to get node info. Aborting.\n"); return ret; } /* register with the pnp layer */ ret = pnp_register_protocol(&pnpbios_protocol); if (ret) { printk(KERN_ERR "PnPBIOS: Unable to register driver. Aborting.\n"); return ret; } /* start the proc interface */ ret = pnpbios_proc_init(); if (ret) printk(KERN_ERR "PnPBIOS: Failed to create proc interface.\n"); /* scan for pnpbios devices */ build_devlist(); pnp_platform_devices = 1; return 0; } fs_initcall(pnpbios_init); static int __init pnpbios_thread_init(void) { struct task_struct *task; if (pnpbios_disabled) return 0; init_completion(&unload_sem); task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd"); if (IS_ERR(task)) return PTR_ERR(task); return 0; } /* Start the kernel thread later: */ module_init(pnpbios_thread_init); EXPORT_SYMBOL(pnpbios_protocol);
gpl-2.0
imoseyon/leanKernel-d2usc-deprecated
arch/mips/mm/c-tx39.c
2580
10856
/* * r2300.c: R2000 and R3000 specific mmu/cache code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * * with a lot of changes to make this thing work for R3000s * Tx39XX R4k style caches added. HK * Copyright (C) 1998, 1999, 2000 Harald Koerfgen * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/cacheops.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/system.h> #include <asm/isadep.h> #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> /* For R3000 cores with R4000 style caches */ static unsigned long icache_size, dcache_size; /* Size in bytes */ #include <asm/r4kcache.h> extern int r3k_have_wired_reg; /* in r3k-tlb.c */ /* This sequence is required to ensure icache is disabled immediately */ #define TX39_STOP_STREAMING() \ __asm__ __volatile__( \ ".set push\n\t" \ ".set noreorder\n\t" \ "b 1f\n\t" \ "nop\n\t" \ "1:\n\t" \ ".set pop" \ ) /* TX39H-style cache flush routines. */ static void tx39h_flush_icache_all(void) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16(); write_c0_conf(config); local_irq_restore(flags); } static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); iob(); blast_inv_dcache_range(addr, addr + size); } /* TX39H2,TX39H3 */ static inline void tx39_blast_dcache_page(unsigned long addr) { if (current_cpu_type() != CPU_TX3912) blast_dcache16_page(addr); } static inline void tx39_blast_dcache_page_indexed(unsigned long addr) { blast_dcache16_page_indexed(addr); } static inline void tx39_blast_dcache(void) { blast_dcache16(); } static inline void tx39_blast_icache_page(unsigned long addr) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16_page(addr); write_c0_conf(config); local_irq_restore(flags); } static inline void tx39_blast_icache_page_indexed(unsigned long addr) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16_page_indexed(addr); write_c0_conf(config); local_irq_restore(flags); } static inline void tx39_blast_icache(void) { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); blast_icache16(); write_c0_conf(config); local_irq_restore(flags); } static void tx39__flush_cache_vmap(void) { tx39_blast_dcache(); } static void tx39__flush_cache_vunmap(void) { tx39_blast_dcache(); } static inline void tx39_flush_cache_all(void) { if (!cpu_has_dc_aliases) return; tx39_blast_dcache(); } static inline void tx39___flush_cache_all(void) { tx39_blast_dcache(); tx39_blast_icache(); } static void tx39_flush_cache_mm(struct mm_struct *mm) { if (!cpu_has_dc_aliases) return; if (cpu_context(smp_processor_id(), mm) != 0) tx39_blast_dcache(); } static void tx39_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (!cpu_has_dc_aliases) return; if (!(cpu_context(smp_processor_id(), vma->vm_mm))) return; tx39_blast_dcache(); } static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) { int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (cpu_context(smp_processor_id(), mm) == 0) return; page &= PAGE_MASK; pgdp = pgd_offset(mm, page); pudp = pud_offset(pgdp, page); pmdp = pmd_offset(pudp, page); ptep = pte_offset(pmdp, page); /* * If the page isn't marked valid, the page cannot possibly be * in the cache. */ if (!(pte_val(*ptep) & _PAGE_PRESENT)) return; /* * Doing flushes for another ASID than the current one is * too difficult since stupid R4k caches do a TLB translation * for every cache flush operation. So we do indexed flushes * in that case, which doesn't overly flush the cache too much. */ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { if (cpu_has_dc_aliases || exec) tx39_blast_dcache_page(page); if (exec) tx39_blast_icache_page(page); return; } /* * Do indexed flush, too much work to get the (possible) TLB refills * to work correctly. */ if (cpu_has_dc_aliases || exec) tx39_blast_dcache_page_indexed(page); if (exec) tx39_blast_icache_page_indexed(page); } static void local_tx39_flush_data_cache_page(void * addr) { tx39_blast_dcache_page((unsigned long)addr); } static void tx39_flush_data_cache_page(unsigned long addr) { tx39_blast_dcache_page(addr); } static void tx39_flush_icache_range(unsigned long start, unsigned long end) { if (end - start > dcache_size) tx39_blast_dcache(); else protected_blast_dcache_range(start, end); if (end - start > icache_size) tx39_blast_icache(); else { unsigned long flags, config; /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); protected_blast_icache_range(start, end); write_c0_conf(config); local_irq_restore(flags); } } static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) { unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; do { tx39_blast_dcache_page(addr); addr += PAGE_SIZE; } while(addr != end); } else if (size > dcache_size) { tx39_blast_dcache(); } else { blast_dcache_range(addr, addr + size); } } static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) { unsigned long end; if (((size | addr) & (PAGE_SIZE - 1)) == 0) { end = addr + size; do { tx39_blast_dcache_page(addr); addr += PAGE_SIZE; } while(addr != end); } else if (size > dcache_size) { tx39_blast_dcache(); } else { blast_inv_dcache_range(addr, addr + size); } } static void tx39_flush_cache_sigtramp(unsigned long addr) { unsigned long ic_lsize = current_cpu_data.icache.linesz; unsigned long dc_lsize = current_cpu_data.dcache.linesz; unsigned long config; unsigned long flags; protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); /* disable icache (set ICE#) */ local_irq_save(flags); config = read_c0_conf(); write_c0_conf(config & ~TX39_CONF_ICE); TX39_STOP_STREAMING(); protected_flush_icache_line(addr & ~(ic_lsize - 1)); write_c0_conf(config); local_irq_restore(flags); } static __init void tx39_probe_cache(void) { unsigned long config; config = read_c0_conf(); icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >> TX39_CONF_ICS_SHIFT)); dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >> TX39_CONF_DCS_SHIFT)); current_cpu_data.icache.linesz = 16; switch (current_cpu_type()) { case CPU_TX3912: current_cpu_data.icache.ways = 1; current_cpu_data.dcache.ways = 1; current_cpu_data.dcache.linesz = 4; break; case CPU_TX3927: current_cpu_data.icache.ways = 2; current_cpu_data.dcache.ways = 2; current_cpu_data.dcache.linesz = 16; break; case CPU_TX3922: default: current_cpu_data.icache.ways = 1; current_cpu_data.dcache.ways = 1; current_cpu_data.dcache.linesz = 16; break; } } void __cpuinit tx39_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); unsigned long config; config = read_c0_conf(); config &= ~TX39_CONF_WBON; write_c0_conf(config); tx39_probe_cache(); switch (current_cpu_type()) { case CPU_TX3912: /* TX39/H core (writethru direct-map cache) */ __flush_cache_vmap = tx39__flush_cache_vmap; __flush_cache_vunmap = tx39__flush_cache_vunmap; flush_cache_all = tx39h_flush_icache_all; __flush_cache_all = tx39h_flush_icache_all; flush_cache_mm = (void *) tx39h_flush_icache_all; flush_cache_range = (void *) tx39h_flush_icache_all; flush_cache_page = (void *) tx39h_flush_icache_all; flush_icache_range = (void *) tx39h_flush_icache_all; local_flush_icache_range = (void *) tx39h_flush_icache_all; flush_cache_sigtramp = (void *) tx39h_flush_icache_all; local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; _dma_cache_wback_inv = tx39h_dma_cache_wback_inv; shm_align_mask = PAGE_SIZE - 1; break; case CPU_TX3922: case CPU_TX3927: default: /* TX39/H2,H3 core (writeback 2way-set-associative cache) */ r3k_have_wired_reg = 1; write_c0_wired(0); /* set 8 on reset... */ /* board-dependent init code may set WBON */ __flush_cache_vmap = tx39__flush_cache_vmap; __flush_cache_vunmap = tx39__flush_cache_vunmap; flush_cache_all = tx39_flush_cache_all; __flush_cache_all = tx39___flush_cache_all; flush_cache_mm = tx39_flush_cache_mm; flush_cache_range = tx39_flush_cache_range; flush_cache_page = tx39_flush_cache_page; flush_icache_range = tx39_flush_icache_range; local_flush_icache_range = tx39_flush_icache_range; flush_cache_sigtramp = tx39_flush_cache_sigtramp; local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; _dma_cache_wback_inv = tx39_dma_cache_wback_inv; _dma_cache_wback = tx39_dma_cache_wback_inv; _dma_cache_inv = tx39_dma_cache_inv; shm_align_mask = max_t(unsigned long, (dcache_size / current_cpu_data.dcache.ways) - 1, PAGE_SIZE - 1); break; } current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways; current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways; current_cpu_data.icache.sets = current_cpu_data.icache.waysize / current_cpu_data.icache.linesz; current_cpu_data.dcache.sets = current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz; if (current_cpu_data.dcache.waysize > PAGE_SIZE) current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES; current_cpu_data.icache.waybit = 0; current_cpu_data.dcache.waybit = 0; printk("Primary instruction cache %ldkB, linesize %d bytes\n", icache_size >> 10, current_cpu_data.icache.linesz); printk("Primary data cache %ldkB, linesize %d bytes\n", dcache_size >> 10, current_cpu_data.dcache.linesz); build_clear_page(); build_copy_page(); tx39h_flush_icache_all(); }
gpl-2.0
bsmitty83/NeWsEnSe
drivers/parport/parport_sunbpp.c
3092
10446
/* parport_sunbpp.c: Parallel-port routines for SBUS * * Author: Derrick J. Brashear <shadow@dementia.org> * * based on work by: * Phil Blundell <philb@gnu.org> * Tim Waugh <tim@cyberelk.demon.co.uk> * Jose Renau <renau@acm.org> * David Campbell <campbell@tirian.che.curtin.edu.au> * Grant Guenther <grant@torque.net> * Eddie C. Dost <ecd@skynet.be> * Stephen Williams (steve@icarus.com) * Gus Baldauf (gbaldauf@ix.netcom.com) * Peter Zaitcev * Tom Dyas * * Updated to new SBUS device framework: David S. Miller <davem@davemloft.net> * */ #include <linux/string.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/parport.h> #include <asm/ptrace.h> #include <linux/interrupt.h> #include <asm/io.h> #include <asm/oplib.h> /* OpenProm Library */ #include <asm/dma.h> /* BPP uses LSI 64854 for DMA */ #include <asm/irq.h> #include <asm/sunbpp.h> #undef __SUNBPP_DEBUG #ifdef __SUNBPP_DEBUG #define dprintk(x) printk x #else #define dprintk(x) #endif static void parport_sunbpp_disable_irq(struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; u32 tmp; tmp = sbus_readl(&regs->p_csr); tmp &= ~DMA_INT_ENAB; sbus_writel(tmp, &regs->p_csr); } static void parport_sunbpp_enable_irq(struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; u32 tmp; tmp = sbus_readl(&regs->p_csr); tmp |= DMA_INT_ENAB; sbus_writel(tmp, &regs->p_csr); } static void parport_sunbpp_write_data(struct parport *p, unsigned char d) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; sbus_writeb(d, &regs->p_dr); dprintk((KERN_DEBUG "wrote 0x%x\n", d)); } static unsigned char parport_sunbpp_read_data(struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; return sbus_readb(&regs->p_dr); } #if 0 static void control_pc_to_sunbpp(struct parport *p, unsigned char status) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(&regs->p_tcr); unsigned char value_or = sbus_readb(&regs->p_or); if (status & PARPORT_CONTROL_STROBE) value_tcr |= P_TCR_DS; if (status & PARPORT_CONTROL_AUTOFD) value_or |= P_OR_AFXN; if (status & PARPORT_CONTROL_INIT) value_or |= P_OR_INIT; if (status & PARPORT_CONTROL_SELECT) value_or |= P_OR_SLCT_IN; sbus_writeb(value_or, &regs->p_or); sbus_writeb(value_tcr, &regs->p_tcr); } #endif static unsigned char status_sunbpp_to_pc(struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char bits = 0; unsigned char value_tcr = sbus_readb(&regs->p_tcr); unsigned char value_ir = sbus_readb(&regs->p_ir); if (!(value_ir & P_IR_ERR)) bits |= PARPORT_STATUS_ERROR; if (!(value_ir & P_IR_SLCT)) bits |= PARPORT_STATUS_SELECT; if (!(value_ir & P_IR_PE)) bits |= PARPORT_STATUS_PAPEROUT; if (value_tcr & P_TCR_ACK) bits |= PARPORT_STATUS_ACK; if (!(value_tcr & P_TCR_BUSY)) bits |= PARPORT_STATUS_BUSY; dprintk((KERN_DEBUG "tcr 0x%x ir 0x%x\n", value_tcr, value_ir)); dprintk((KERN_DEBUG "read status 0x%x\n", bits)); return bits; } static unsigned char control_sunbpp_to_pc(struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char bits = 0; unsigned char value_tcr = sbus_readb(&regs->p_tcr); unsigned char value_or = sbus_readb(&regs->p_or); if (!(value_tcr & P_TCR_DS)) bits |= PARPORT_CONTROL_STROBE; if (!(value_or & P_OR_AFXN)) bits |= PARPORT_CONTROL_AUTOFD; if (!(value_or & P_OR_INIT)) bits |= PARPORT_CONTROL_INIT; if (value_or & P_OR_SLCT_IN) bits |= PARPORT_CONTROL_SELECT; dprintk((KERN_DEBUG "tcr 0x%x or 0x%x\n", value_tcr, value_or)); dprintk((KERN_DEBUG "read control 0x%x\n", bits)); return bits; } static unsigned char parport_sunbpp_read_control(struct parport *p) { return control_sunbpp_to_pc(p); } static unsigned char parport_sunbpp_frob_control(struct parport *p, unsigned char mask, unsigned char val) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(&regs->p_tcr); unsigned char value_or = sbus_readb(&regs->p_or); dprintk((KERN_DEBUG "frob1: tcr 0x%x or 0x%x\n", value_tcr, value_or)); if (mask & PARPORT_CONTROL_STROBE) { if (val & PARPORT_CONTROL_STROBE) { value_tcr &= ~P_TCR_DS; } else { value_tcr |= P_TCR_DS; } } if (mask & PARPORT_CONTROL_AUTOFD) { if (val & PARPORT_CONTROL_AUTOFD) { value_or &= ~P_OR_AFXN; } else { value_or |= P_OR_AFXN; } } if (mask & PARPORT_CONTROL_INIT) { if (val & PARPORT_CONTROL_INIT) { value_or &= ~P_OR_INIT; } else { value_or |= P_OR_INIT; } } if (mask & PARPORT_CONTROL_SELECT) { if (val & PARPORT_CONTROL_SELECT) { value_or |= P_OR_SLCT_IN; } else { value_or &= ~P_OR_SLCT_IN; } } sbus_writeb(value_or, &regs->p_or); sbus_writeb(value_tcr, &regs->p_tcr); dprintk((KERN_DEBUG "frob2: tcr 0x%x or 0x%x\n", value_tcr, value_or)); return parport_sunbpp_read_control(p); } static void parport_sunbpp_write_control(struct parport *p, unsigned char d) { const unsigned char wm = (PARPORT_CONTROL_STROBE | PARPORT_CONTROL_AUTOFD | PARPORT_CONTROL_INIT | PARPORT_CONTROL_SELECT); parport_sunbpp_frob_control (p, wm, d & wm); } static unsigned char parport_sunbpp_read_status(struct parport *p) { return status_sunbpp_to_pc(p); } static void parport_sunbpp_data_forward (struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; unsigned char value_tcr = sbus_readb(&regs->p_tcr); dprintk((KERN_DEBUG "forward\n")); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, &regs->p_tcr); } static void parport_sunbpp_data_reverse (struct parport *p) { struct bpp_regs __iomem *regs = (struct bpp_regs __iomem *)p->base; u8 val = sbus_readb(&regs->p_tcr); dprintk((KERN_DEBUG "reverse\n")); val |= P_TCR_DIR; sbus_writeb(val, &regs->p_tcr); } static void parport_sunbpp_init_state(struct pardevice *dev, struct parport_state *s) { s->u.pc.ctr = 0xc; s->u.pc.ecr = 0x0; } static void parport_sunbpp_save_state(struct parport *p, struct parport_state *s) { s->u.pc.ctr = parport_sunbpp_read_control(p); } static void parport_sunbpp_restore_state(struct parport *p, struct parport_state *s) { parport_sunbpp_write_control(p, s->u.pc.ctr); } static struct parport_operations parport_sunbpp_ops = { .write_data = parport_sunbpp_write_data, .read_data = parport_sunbpp_read_data, .write_control = parport_sunbpp_write_control, .read_control = parport_sunbpp_read_control, .frob_control = parport_sunbpp_frob_control, .read_status = parport_sunbpp_read_status, .enable_irq = parport_sunbpp_enable_irq, .disable_irq = parport_sunbpp_disable_irq, .data_forward = parport_sunbpp_data_forward, .data_reverse = parport_sunbpp_data_reverse, .init_state = parport_sunbpp_init_state, .save_state = parport_sunbpp_save_state, .restore_state = parport_sunbpp_restore_state, .epp_write_data = parport_ieee1284_epp_write_data, .epp_read_data = parport_ieee1284_epp_read_data, .epp_write_addr = parport_ieee1284_epp_write_addr, .epp_read_addr = parport_ieee1284_epp_read_addr, .ecp_write_data = parport_ieee1284_ecp_write_data, .ecp_read_data = parport_ieee1284_ecp_read_data, .ecp_write_addr = parport_ieee1284_ecp_write_addr, .compat_write_data = parport_ieee1284_write_compat, .nibble_read_data = parport_ieee1284_read_nibble, .byte_read_data = parport_ieee1284_read_byte, .owner = THIS_MODULE, }; static int __devinit bpp_probe(struct platform_device *op) { struct parport_operations *ops; struct bpp_regs __iomem *regs; int irq, dma, err = 0, size; unsigned char value_tcr; void __iomem *base; struct parport *p; irq = op->archdata.irqs[0]; base = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "sunbpp"); if (!base) return -ENODEV; size = resource_size(&op->resource[0]); dma = PARPORT_DMA_NONE; ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL); if (!ops) goto out_unmap; memcpy (ops, &parport_sunbpp_ops, sizeof(struct parport_operations)); dprintk(("register_port\n")); if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) goto out_free_ops; p->size = size; p->dev = &op->dev; if ((err = request_irq(p->irq, parport_irq_handler, IRQF_SHARED, p->name, p)) != 0) { goto out_put_port; } parport_sunbpp_enable_irq(p); regs = (struct bpp_regs __iomem *)p->base; value_tcr = sbus_readb(&regs->p_tcr); value_tcr &= ~P_TCR_DIR; sbus_writeb(value_tcr, &regs->p_tcr); printk(KERN_INFO "%s: sunbpp at 0x%lx\n", p->name, p->base); dev_set_drvdata(&op->dev, p); parport_announce_port(p); return 0; out_put_port: parport_put_port(p); out_free_ops: kfree(ops); out_unmap: of_iounmap(&op->resource[0], base, size); return err; } static int __devexit bpp_remove(struct platform_device *op) { struct parport *p = dev_get_drvdata(&op->dev); struct parport_operations *ops = p->ops; parport_remove_port(p); if (p->irq != PARPORT_IRQ_NONE) { parport_sunbpp_disable_irq(p); free_irq(p->irq, p); } of_iounmap(&op->resource[0], (void __iomem *) p->base, p->size); parport_put_port(p); kfree(ops); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id bpp_match[] = { { .name = "SUNW,bpp", }, {}, }; MODULE_DEVICE_TABLE(of, bpp_match); static struct platform_driver bpp_sbus_driver = { .driver = { .name = "bpp", .owner = THIS_MODULE, .of_match_table = bpp_match, }, .probe = bpp_probe, .remove = __devexit_p(bpp_remove), }; static int __init parport_sunbpp_init(void) { return platform_driver_register(&bpp_sbus_driver); } static void __exit parport_sunbpp_exit(void) { platform_driver_unregister(&bpp_sbus_driver); } MODULE_AUTHOR("Derrick J Brashear"); MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port"); MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); module_init(parport_sunbpp_init) module_exit(parport_sunbpp_exit)
gpl-2.0
iAMr00t/android_kernel_lge_ls840
drivers/media/video/indycam.c
3348
9407
/* * indycam.c - Silicon Graphics IndyCam digital camera driver * * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/major.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/slab.h> /* IndyCam decodes stream of photons into digital image representation ;-) */ #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include "indycam.h" #define INDYCAM_MODULE_VERSION "0.0.5" MODULE_DESCRIPTION("SGI IndyCam driver"); MODULE_VERSION(INDYCAM_MODULE_VERSION); MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>"); MODULE_LICENSE("GPL"); // #define INDYCAM_DEBUG #ifdef INDYCAM_DEBUG #define dprintk(x...) printk("IndyCam: " x); #define indycam_regdump(client) indycam_regdump_debug(client) #else #define dprintk(x...) #define indycam_regdump(client) #endif struct indycam { struct v4l2_subdev sd; u8 version; }; static inline struct indycam *to_indycam(struct v4l2_subdev *sd) { return container_of(sd, struct indycam, sd); } static const u8 initseq[] = { INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */ INDYCAM_SHUTTER_60, /* INDYCAM_SHUTTER */ INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */ 0x00, /* INDYCAM_BRIGHTNESS (read-only) */ INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */ INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */ INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */ INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */ }; /* IndyCam register handling */ static int indycam_read_reg(struct v4l2_subdev *sd, u8 reg, u8 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (reg == INDYCAM_REG_RESET) { dprintk("indycam_read_reg(): " "skipping write-only register %d\n", reg); *value = 0; return 0; } ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) { printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, " "register = 0x%02x\n", reg); return ret; } *value = (u8)ret; return 0; } static int indycam_write_reg(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); int err; if (reg == INDYCAM_REG_BRIGHTNESS || reg == INDYCAM_REG_VERSION) { dprintk("indycam_write_reg(): " "skipping read-only register %d\n", reg); return 0; } dprintk("Writing Reg %d = 0x%02x\n", reg, value); err = i2c_smbus_write_byte_data(client, reg, value); if (err) { printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, " "register = 0x%02x, value = 0x%02x\n", reg, value); } return err; } static int indycam_write_block(struct v4l2_subdev *sd, u8 reg, u8 length, u8 *data) { int i, err; for (i = 0; i < length; i++) { err = indycam_write_reg(sd, reg + i, data[i]); if (err) return err; } return 0; } /* Helper functions */ #ifdef INDYCAM_DEBUG static void indycam_regdump_debug(struct v4l2_subdev *sd) { int i; u8 val; for (i = 0; i < 9; i++) { indycam_read_reg(sd, i, &val); dprintk("Reg %d = 0x%02x\n", i, val); } } #endif static int indycam_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct indycam *camera = to_indycam(sd); u8 reg; int ret = 0; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: case V4L2_CID_AUTO_WHITE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg); if (ret) return -EIO; if (ctrl->id == V4L2_CID_AUTOGAIN) ctrl->value = (reg & INDYCAM_CONTROL_AGCENA) ? 1 : 0; else ctrl->value = (reg & INDYCAM_CONTROL_AWBCTL) ? 1 : 0; break; case V4L2_CID_EXPOSURE: ret = indycam_read_reg(sd, INDYCAM_REG_SHUTTER, &reg); if (ret) return -EIO; ctrl->value = ((s32)reg == 0x00) ? 0xff : ((s32)reg - 1); break; case V4L2_CID_GAIN: ret = indycam_read_reg(sd, INDYCAM_REG_GAIN, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_RED_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_RED_BALANCE, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_BLUE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_BALANCE, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case INDYCAM_CONTROL_RED_SATURATION: ret = indycam_read_reg(sd, INDYCAM_REG_RED_SATURATION, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case INDYCAM_CONTROL_BLUE_SATURATION: ret = indycam_read_reg(sd, INDYCAM_REG_BLUE_SATURATION, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; break; case V4L2_CID_GAMMA: if (camera->version == CAMERA_VERSION_MOOSE) { ret = indycam_read_reg(sd, INDYCAM_REG_GAMMA, &reg); if (ret) return -EIO; ctrl->value = (s32)reg; } else { ctrl->value = INDYCAM_GAMMA_DEFAULT; } break; default: ret = -EINVAL; } return ret; } static int indycam_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct indycam *camera = to_indycam(sd); u8 reg; int ret = 0; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: case V4L2_CID_AUTO_WHITE_BALANCE: ret = indycam_read_reg(sd, INDYCAM_REG_CONTROL, &reg); if (ret) break; if (ctrl->id == V4L2_CID_AUTOGAIN) { if (ctrl->value) reg |= INDYCAM_CONTROL_AGCENA; else reg &= ~INDYCAM_CONTROL_AGCENA; } else { if (ctrl->value) reg |= INDYCAM_CONTROL_AWBCTL; else reg &= ~INDYCAM_CONTROL_AWBCTL; } ret = indycam_write_reg(sd, INDYCAM_REG_CONTROL, reg); break; case V4L2_CID_EXPOSURE: reg = (ctrl->value == 0xff) ? 0x00 : (ctrl->value + 1); ret = indycam_write_reg(sd, INDYCAM_REG_SHUTTER, reg); break; case V4L2_CID_GAIN: ret = indycam_write_reg(sd, INDYCAM_REG_GAIN, ctrl->value); break; case V4L2_CID_RED_BALANCE: ret = indycam_write_reg(sd, INDYCAM_REG_RED_BALANCE, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_BALANCE, ctrl->value); break; case INDYCAM_CONTROL_RED_SATURATION: ret = indycam_write_reg(sd, INDYCAM_REG_RED_SATURATION, ctrl->value); break; case INDYCAM_CONTROL_BLUE_SATURATION: ret = indycam_write_reg(sd, INDYCAM_REG_BLUE_SATURATION, ctrl->value); break; case V4L2_CID_GAMMA: if (camera->version == CAMERA_VERSION_MOOSE) { ret = indycam_write_reg(sd, INDYCAM_REG_GAMMA, ctrl->value); } break; default: ret = -EINVAL; } return ret; } /* I2C-interface */ static int indycam_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct indycam *camera = to_indycam(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_INDYCAM, camera->version); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops indycam_core_ops = { .g_chip_ident = indycam_g_chip_ident, .g_ctrl = indycam_g_ctrl, .s_ctrl = indycam_s_ctrl, }; static const struct v4l2_subdev_ops indycam_ops = { .core = &indycam_core_ops, }; static int indycam_probe(struct i2c_client *client, const struct i2c_device_id *id) { int err = 0; struct indycam *camera; struct v4l2_subdev *sd; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); camera = kzalloc(sizeof(struct indycam), GFP_KERNEL); if (!camera) return -ENOMEM; sd = &camera->sd; v4l2_i2c_subdev_init(sd, client, &indycam_ops); camera->version = i2c_smbus_read_byte_data(client, INDYCAM_REG_VERSION); if (camera->version != CAMERA_VERSION_INDY && camera->version != CAMERA_VERSION_MOOSE) { kfree(camera); return -ENODEV; } printk(KERN_INFO "IndyCam v%d.%d detected\n", INDYCAM_VERSION_MAJOR(camera->version), INDYCAM_VERSION_MINOR(camera->version)); indycam_regdump(sd); // initialize err = indycam_write_block(sd, 0, sizeof(initseq), (u8 *)&initseq); if (err) { printk(KERN_ERR "IndyCam initialization failed\n"); kfree(camera); return -EIO; } indycam_regdump(sd); // white balance err = indycam_write_reg(sd, INDYCAM_REG_CONTROL, INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL); if (err) { printk(KERN_ERR "IndyCam: White balancing camera failed\n"); kfree(camera); return -EIO; } indycam_regdump(sd); printk(KERN_INFO "IndyCam initialized\n"); return 0; } static int indycam_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_indycam(sd)); return 0; } static const struct i2c_device_id indycam_id[] = { { "indycam", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, indycam_id); static struct i2c_driver indycam_driver = { .driver = { .owner = THIS_MODULE, .name = "indycam", }, .probe = indycam_probe, .remove = indycam_remove, .id_table = indycam_id, }; static __init int init_indycam(void) { return i2c_add_driver(&indycam_driver); } static __exit void exit_indycam(void) { i2c_del_driver(&indycam_driver); } module_init(init_indycam); module_exit(exit_indycam);
gpl-2.0
ryrzy/shooter_u_ics_kernel_3.0.xx
arch/arm/plat-iop/adma.c
11796
5374
/* * platform device definitions for the iop3xx dma/xor engines * Copyright © 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/platform_device.h> #include <asm/hardware/iop3xx.h> #include <linux/dma-mapping.h> #include <mach/adma.h> #include <asm/hardware/iop_adma.h> #ifdef CONFIG_ARCH_IOP32X #define IRQ_DMA0_EOT IRQ_IOP32X_DMA0_EOT #define IRQ_DMA0_EOC IRQ_IOP32X_DMA0_EOC #define IRQ_DMA0_ERR IRQ_IOP32X_DMA0_ERR #define IRQ_DMA1_EOT IRQ_IOP32X_DMA1_EOT #define IRQ_DMA1_EOC IRQ_IOP32X_DMA1_EOC #define IRQ_DMA1_ERR IRQ_IOP32X_DMA1_ERR #define IRQ_AA_EOT IRQ_IOP32X_AA_EOT #define IRQ_AA_EOC IRQ_IOP32X_AA_EOC #define IRQ_AA_ERR IRQ_IOP32X_AA_ERR #endif #ifdef CONFIG_ARCH_IOP33X #define IRQ_DMA0_EOT IRQ_IOP33X_DMA0_EOT #define IRQ_DMA0_EOC IRQ_IOP33X_DMA0_EOC #define IRQ_DMA0_ERR IRQ_IOP33X_DMA0_ERR #define IRQ_DMA1_EOT IRQ_IOP33X_DMA1_EOT #define IRQ_DMA1_EOC IRQ_IOP33X_DMA1_EOC #define IRQ_DMA1_ERR IRQ_IOP33X_DMA1_ERR #define IRQ_AA_EOT IRQ_IOP33X_AA_EOT #define IRQ_AA_EOC IRQ_IOP33X_AA_EOC #define IRQ_AA_ERR IRQ_IOP33X_AA_ERR #endif /* AAU and DMA Channels */ static struct resource iop3xx_dma_0_resources[] = { [0] = { .start = IOP3XX_DMA_PHYS_BASE(0), .end = IOP3XX_DMA_UPPER_PA(0), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DMA0_EOT, .end = IRQ_DMA0_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_DMA0_EOC, .end = IRQ_DMA0_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_DMA0_ERR, .end = IRQ_DMA0_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop3xx_dma_1_resources[] = { [0] = { .start = IOP3XX_DMA_PHYS_BASE(1), .end = IOP3XX_DMA_UPPER_PA(1), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DMA1_EOT, .end = IRQ_DMA1_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_DMA1_EOC, .end = IRQ_DMA1_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_DMA1_ERR, .end = IRQ_DMA1_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop3xx_aau_resources[] = { [0] = { .start = IOP3XX_AAU_PHYS_BASE, .end = IOP3XX_AAU_UPPER_PA, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_AA_EOT, .end = IRQ_AA_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_AA_EOC, .end = IRQ_AA_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_AA_ERR, .end = IRQ_AA_ERR, .flags = IORESOURCE_IRQ } }; static u64 iop3xx_adma_dmamask = DMA_BIT_MASK(32); static struct iop_adma_platform_data iop3xx_dma_0_data = { .hw_id = DMA0_ID, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop3xx_dma_1_data = { .hw_id = DMA1_ID, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop3xx_aau_data = { .hw_id = AAU_ID, .pool_size = 3 * PAGE_SIZE, }; struct platform_device iop3xx_dma_0_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop3xx_dma_0_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_dma_0_data, }, }; struct platform_device iop3xx_dma_1_channel = { .name = "iop-adma", .id = 1, .num_resources = 4, .resource = iop3xx_dma_1_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_dma_1_data, }, }; struct platform_device iop3xx_aau_channel = { .name = "iop-adma", .id = 2, .num_resources = 4, .resource = iop3xx_aau_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_aau_data, }, }; static int __init iop3xx_adma_cap_init(void) { #ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */ dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #endif #ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */ dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #endif #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif return 0; } arch_initcall(iop3xx_adma_cap_init);
gpl-2.0
binkybear/furnace-bacon
arch/arm/plat-iop/adma.c
11796
5374
/* * platform device definitions for the iop3xx dma/xor engines * Copyright © 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/platform_device.h> #include <asm/hardware/iop3xx.h> #include <linux/dma-mapping.h> #include <mach/adma.h> #include <asm/hardware/iop_adma.h> #ifdef CONFIG_ARCH_IOP32X #define IRQ_DMA0_EOT IRQ_IOP32X_DMA0_EOT #define IRQ_DMA0_EOC IRQ_IOP32X_DMA0_EOC #define IRQ_DMA0_ERR IRQ_IOP32X_DMA0_ERR #define IRQ_DMA1_EOT IRQ_IOP32X_DMA1_EOT #define IRQ_DMA1_EOC IRQ_IOP32X_DMA1_EOC #define IRQ_DMA1_ERR IRQ_IOP32X_DMA1_ERR #define IRQ_AA_EOT IRQ_IOP32X_AA_EOT #define IRQ_AA_EOC IRQ_IOP32X_AA_EOC #define IRQ_AA_ERR IRQ_IOP32X_AA_ERR #endif #ifdef CONFIG_ARCH_IOP33X #define IRQ_DMA0_EOT IRQ_IOP33X_DMA0_EOT #define IRQ_DMA0_EOC IRQ_IOP33X_DMA0_EOC #define IRQ_DMA0_ERR IRQ_IOP33X_DMA0_ERR #define IRQ_DMA1_EOT IRQ_IOP33X_DMA1_EOT #define IRQ_DMA1_EOC IRQ_IOP33X_DMA1_EOC #define IRQ_DMA1_ERR IRQ_IOP33X_DMA1_ERR #define IRQ_AA_EOT IRQ_IOP33X_AA_EOT #define IRQ_AA_EOC IRQ_IOP33X_AA_EOC #define IRQ_AA_ERR IRQ_IOP33X_AA_ERR #endif /* AAU and DMA Channels */ static struct resource iop3xx_dma_0_resources[] = { [0] = { .start = IOP3XX_DMA_PHYS_BASE(0), .end = IOP3XX_DMA_UPPER_PA(0), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DMA0_EOT, .end = IRQ_DMA0_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_DMA0_EOC, .end = IRQ_DMA0_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_DMA0_ERR, .end = IRQ_DMA0_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop3xx_dma_1_resources[] = { [0] = { .start = IOP3XX_DMA_PHYS_BASE(1), .end = IOP3XX_DMA_UPPER_PA(1), .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DMA1_EOT, .end = IRQ_DMA1_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_DMA1_EOC, .end = IRQ_DMA1_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_DMA1_ERR, .end = IRQ_DMA1_ERR, .flags = IORESOURCE_IRQ } }; static struct resource iop3xx_aau_resources[] = { [0] = { .start = IOP3XX_AAU_PHYS_BASE, .end = IOP3XX_AAU_UPPER_PA, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_AA_EOT, .end = IRQ_AA_EOT, .flags = IORESOURCE_IRQ }, [2] = { .start = IRQ_AA_EOC, .end = IRQ_AA_EOC, .flags = IORESOURCE_IRQ }, [3] = { .start = IRQ_AA_ERR, .end = IRQ_AA_ERR, .flags = IORESOURCE_IRQ } }; static u64 iop3xx_adma_dmamask = DMA_BIT_MASK(32); static struct iop_adma_platform_data iop3xx_dma_0_data = { .hw_id = DMA0_ID, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop3xx_dma_1_data = { .hw_id = DMA1_ID, .pool_size = PAGE_SIZE, }; static struct iop_adma_platform_data iop3xx_aau_data = { .hw_id = AAU_ID, .pool_size = 3 * PAGE_SIZE, }; struct platform_device iop3xx_dma_0_channel = { .name = "iop-adma", .id = 0, .num_resources = 4, .resource = iop3xx_dma_0_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_dma_0_data, }, }; struct platform_device iop3xx_dma_1_channel = { .name = "iop-adma", .id = 1, .num_resources = 4, .resource = iop3xx_dma_1_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_dma_1_data, }, }; struct platform_device iop3xx_aau_channel = { .name = "iop-adma", .id = 2, .num_resources = 4, .resource = iop3xx_aau_resources, .dev = { .dma_mask = &iop3xx_adma_dmamask, .coherent_dma_mask = DMA_BIT_MASK(64), .platform_data = (void *) &iop3xx_aau_data, }, }; static int __init iop3xx_adma_cap_init(void) { #ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */ dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #endif #ifdef CONFIG_ARCH_IOP32X /* the 32x DMA does not perform CRC32C */ dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #endif #ifdef CONFIG_ARCH_IOP32X /* the 32x AAU does not perform zero sum */ dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif return 0; } arch_initcall(iop3xx_adma_cap_init);
gpl-2.0
linuxmake/kernel_softwinner_fiber
scripts/unifdef.c
12564
35639
/* * Copyright (c) 2002 - 2011 Tony Finch <dot@dotat.at> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * unifdef - remove ifdef'ed lines * * This code was derived from software contributed to Berkeley by Dave Yost. * It was rewritten to support ANSI C by Tony Finch. The original version * of unifdef carried the 4-clause BSD copyright licence. None of its code * remains in this version (though some of the names remain) so it now * carries a more liberal licence. * * Wishlist: * provide an option which will append the name of the * appropriate symbol after #else's and #endif's * provide an option which will check symbols after * #else's and #endif's to see that they match their * corresponding #ifdef or #ifndef * * These require better buffer handling, which would also make * it possible to handle all "dodgy" directives correctly. */ #include <sys/types.h> #include <sys/stat.h> #include <ctype.h> #include <err.h> #include <errno.h> #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> const char copyright[] = "@(#) $Version: unifdef-2.5 $\n" "@(#) $Author: Tony Finch (dot@dotat.at) $\n" "@(#) $URL: http://dotat.at/prog/unifdef $\n" ; /* types of input lines: */ typedef enum { LT_TRUEI, /* a true #if with ignore flag */ LT_FALSEI, /* a false #if with ignore flag */ LT_IF, /* an unknown #if */ LT_TRUE, /* a true #if */ LT_FALSE, /* a false #if */ LT_ELIF, /* an unknown #elif */ LT_ELTRUE, /* a true #elif */ LT_ELFALSE, /* a false #elif */ LT_ELSE, /* #else */ LT_ENDIF, /* #endif */ LT_DODGY, /* flag: directive is not on one line */ LT_DODGY_LAST = LT_DODGY + LT_ENDIF, LT_PLAIN, /* ordinary line */ LT_EOF, /* end of file */ LT_ERROR, /* unevaluable #if */ LT_COUNT } Linetype; static char const * const linetype_name[] = { "TRUEI", "FALSEI", "IF", "TRUE", "FALSE", "ELIF", "ELTRUE", "ELFALSE", "ELSE", "ENDIF", "DODGY TRUEI", "DODGY FALSEI", "DODGY IF", "DODGY TRUE", "DODGY FALSE", "DODGY ELIF", "DODGY ELTRUE", "DODGY ELFALSE", "DODGY ELSE", "DODGY ENDIF", "PLAIN", "EOF", "ERROR" }; /* state of #if processing */ typedef enum { IS_OUTSIDE, IS_FALSE_PREFIX, /* false #if followed by false #elifs */ IS_TRUE_PREFIX, /* first non-false #(el)if is true */ IS_PASS_MIDDLE, /* first non-false #(el)if is unknown */ IS_FALSE_MIDDLE, /* a false #elif after a pass state */ IS_TRUE_MIDDLE, /* a true #elif after a pass state */ IS_PASS_ELSE, /* an else after a pass state */ IS_FALSE_ELSE, /* an else after a true state */ IS_TRUE_ELSE, /* an else after only false states */ IS_FALSE_TRAILER, /* #elifs after a true are false */ IS_COUNT } Ifstate; static char const * const ifstate_name[] = { "OUTSIDE", "FALSE_PREFIX", "TRUE_PREFIX", "PASS_MIDDLE", "FALSE_MIDDLE", "TRUE_MIDDLE", "PASS_ELSE", "FALSE_ELSE", "TRUE_ELSE", "FALSE_TRAILER" }; /* state of comment parser */ typedef enum { NO_COMMENT = false, /* outside a comment */ C_COMMENT, /* in a comment like this one */ CXX_COMMENT, /* between // and end of line */ STARTING_COMMENT, /* just after slash-backslash-newline */ FINISHING_COMMENT, /* star-backslash-newline in a C comment */ CHAR_LITERAL, /* inside '' */ STRING_LITERAL /* inside "" */ } Comment_state; static char const * const comment_name[] = { "NO", "C", "CXX", "STARTING", "FINISHING", "CHAR", "STRING" }; /* state of preprocessor line parser */ typedef enum { LS_START, /* only space and comments on this line */ LS_HASH, /* only space, comments, and a hash */ LS_DIRTY /* this line can't be a preprocessor line */ } Line_state; static char const * const linestate_name[] = { "START", "HASH", "DIRTY" }; /* * Minimum translation limits from ISO/IEC 9899:1999 5.2.4.1 */ #define MAXDEPTH 64 /* maximum #if nesting */ #define MAXLINE 4096 /* maximum length of line */ #define MAXSYMS 4096 /* maximum number of symbols */ /* * Sometimes when editing a keyword the replacement text is longer, so * we leave some space at the end of the tline buffer to accommodate this. */ #define EDITSLOP 10 /* * For temporary filenames */ #define TEMPLATE "unifdef.XXXXXX" /* * Globals. */ static bool compblank; /* -B: compress blank lines */ static bool lnblank; /* -b: blank deleted lines */ static bool complement; /* -c: do the complement */ static bool debugging; /* -d: debugging reports */ static bool iocccok; /* -e: fewer IOCCC errors */ static bool strictlogic; /* -K: keep ambiguous #ifs */ static bool killconsts; /* -k: eval constant #ifs */ static bool lnnum; /* -n: add #line directives */ static bool symlist; /* -s: output symbol list */ static bool symdepth; /* -S: output symbol depth */ static bool text; /* -t: this is a text file */ static const char *symname[MAXSYMS]; /* symbol name */ static const char *value[MAXSYMS]; /* -Dsym=value */ static bool ignore[MAXSYMS]; /* -iDsym or -iUsym */ static int nsyms; /* number of symbols */ static FILE *input; /* input file pointer */ static const char *filename; /* input file name */ static int linenum; /* current line number */ static FILE *output; /* output file pointer */ static const char *ofilename; /* output file name */ static bool overwriting; /* output overwrites input */ static char tempname[FILENAME_MAX]; /* used when overwriting */ static char tline[MAXLINE+EDITSLOP];/* input buffer plus space */ static char *keyword; /* used for editing #elif's */ static const char *newline; /* input file format */ static const char newline_unix[] = "\n"; static const char newline_crlf[] = "\r\n"; static Comment_state incomment; /* comment parser state */ static Line_state linestate; /* #if line parser state */ static Ifstate ifstate[MAXDEPTH]; /* #if processor state */ static bool ignoring[MAXDEPTH]; /* ignore comments state */ static int stifline[MAXDEPTH]; /* start of current #if */ static int depth; /* current #if nesting */ static int delcount; /* count of deleted lines */ static unsigned blankcount; /* count of blank lines */ static unsigned blankmax; /* maximum recent blankcount */ static bool constexpr; /* constant #if expression */ static bool zerosyms = true; /* to format symdepth output */ static bool firstsym; /* ditto */ static int exitstat; /* program exit status */ static void addsym(bool, bool, char *); static void closeout(void); static void debug(const char *, ...); static void done(void); static void error(const char *); static int findsym(const char *); static void flushline(bool); static Linetype parseline(void); static Linetype ifeval(const char **); static void ignoreoff(void); static void ignoreon(void); static void keywordedit(const char *); static void nest(void); static void process(void); static const char *skipargs(const char *); static const char *skipcomment(const char *); static const char *skipsym(const char *); static void state(Ifstate); static int strlcmp(const char *, const char *, size_t); static void unnest(void); static void usage(void); static void version(void); #define endsym(c) (!isalnum((unsigned char)c) && c != '_') /* * The main program. */ int main(int argc, char *argv[]) { int opt; while ((opt = getopt(argc, argv, "i:D:U:I:o:bBcdeKklnsStV")) != -1) switch (opt) { case 'i': /* treat stuff controlled by these symbols as text */ /* * For strict backwards-compatibility the U or D * should be immediately after the -i but it doesn't * matter much if we relax that requirement. */ opt = *optarg++; if (opt == 'D') addsym(true, true, optarg); else if (opt == 'U') addsym(true, false, optarg); else usage(); break; case 'D': /* define a symbol */ addsym(false, true, optarg); break; case 'U': /* undef a symbol */ addsym(false, false, optarg); break; case 'I': /* no-op for compatibility with cpp */ break; case 'b': /* blank deleted lines instead of omitting them */ case 'l': /* backwards compatibility */ lnblank = true; break; case 'B': /* compress blank lines around removed section */ compblank = true; break; case 'c': /* treat -D as -U and vice versa */ complement = true; break; case 'd': debugging = true; break; case 'e': /* fewer errors from dodgy lines */ iocccok = true; break; case 'K': /* keep ambiguous #ifs */ strictlogic = true; break; case 'k': /* process constant #ifs */ killconsts = true; break; case 'n': /* add #line directive after deleted lines */ lnnum = true; break; case 'o': /* output to a file */ ofilename = optarg; break; case 's': /* only output list of symbols that control #ifs */ symlist = true; break; case 'S': /* list symbols with their nesting depth */ symlist = symdepth = true; break; case 't': /* don't parse C comments */ text = true; break; case 'V': /* print version */ version(); default: usage(); } argc -= optind; argv += optind; if (compblank && lnblank) errx(2, "-B and -b are mutually exclusive"); if (argc > 1) { errx(2, "can only do one file"); } else if (argc == 1 && strcmp(*argv, "-") != 0) { filename = *argv; input = fopen(filename, "rb"); if (input == NULL) err(2, "can't open %s", filename); } else { filename = "[stdin]"; input = stdin; } if (ofilename == NULL) { ofilename = "[stdout]"; output = stdout; } else { struct stat ist, ost; if (stat(ofilename, &ost) == 0 && fstat(fileno(input), &ist) == 0) overwriting = (ist.st_dev == ost.st_dev && ist.st_ino == ost.st_ino); if (overwriting) { const char *dirsep; int ofd; dirsep = strrchr(ofilename, '/'); if (dirsep != NULL) snprintf(tempname, sizeof(tempname), "%.*s/" TEMPLATE, (int)(dirsep - ofilename), ofilename); else snprintf(tempname, sizeof(tempname), TEMPLATE); ofd = mkstemp(tempname); if (ofd != -1) output = fdopen(ofd, "wb+"); if (output == NULL) err(2, "can't create temporary file"); fchmod(ofd, ist.st_mode & (S_IRWXU|S_IRWXG|S_IRWXO)); } else { output = fopen(ofilename, "wb"); if (output == NULL) err(2, "can't open %s", ofilename); } } process(); abort(); /* bug */ } static void version(void) { const char *c = copyright; for (;;) { while (*++c != '$') if (*c == '\0') exit(0); while (*++c != '$') putc(*c, stderr); putc('\n', stderr); } } static void usage(void) { fprintf(stderr, "usage: unifdef [-bBcdeKknsStV] [-Ipath]" " [-Dsym[=val]] [-Usym] [-iDsym[=val]] [-iUsym] ... [file]\n"); exit(2); } /* * A state transition function alters the global #if processing state * in a particular way. The table below is indexed by the current * processing state and the type of the current line. * * Nesting is handled by keeping a stack of states; some transition * functions increase or decrease the depth. They also maintain the * ignore state on a stack. In some complicated cases they have to * alter the preprocessor directive, as follows. * * When we have processed a group that starts off with a known-false * #if/#elif sequence (which has therefore been deleted) followed by a * #elif that we don't understand and therefore must keep, we edit the * latter into a #if to keep the nesting correct. We use strncpy() to * overwrite the 4 byte token "elif" with "if " without a '\0' byte. * * When we find a true #elif in a group, the following block will * always be kept and the rest of the sequence after the next #elif or * #else will be discarded. We edit the #elif into a #else and the * following directive to #endif since this has the desired behaviour. * * "Dodgy" directives are split across multiple lines, the most common * example being a multi-line comment hanging off the right of the * directive. We can handle them correctly only if there is no change * from printing to dropping (or vice versa) caused by that directive. * If the directive is the first of a group we have a choice between * failing with an error, or passing it through unchanged instead of * evaluating it. The latter is not the default to avoid questions from * users about unifdef unexpectedly leaving behind preprocessor directives. */ typedef void state_fn(void); /* report an error */ static void Eelif (void) { error("Inappropriate #elif"); } static void Eelse (void) { error("Inappropriate #else"); } static void Eendif(void) { error("Inappropriate #endif"); } static void Eeof (void) { error("Premature EOF"); } static void Eioccc(void) { error("Obfuscated preprocessor control line"); } /* plain line handling */ static void print (void) { flushline(true); } static void drop (void) { flushline(false); } /* output lacks group's start line */ static void Strue (void) { drop(); ignoreoff(); state(IS_TRUE_PREFIX); } static void Sfalse(void) { drop(); ignoreoff(); state(IS_FALSE_PREFIX); } static void Selse (void) { drop(); state(IS_TRUE_ELSE); } /* print/pass this block */ static void Pelif (void) { print(); ignoreoff(); state(IS_PASS_MIDDLE); } static void Pelse (void) { print(); state(IS_PASS_ELSE); } static void Pendif(void) { print(); unnest(); } /* discard this block */ static void Dfalse(void) { drop(); ignoreoff(); state(IS_FALSE_TRAILER); } static void Delif (void) { drop(); ignoreoff(); state(IS_FALSE_MIDDLE); } static void Delse (void) { drop(); state(IS_FALSE_ELSE); } static void Dendif(void) { drop(); unnest(); } /* first line of group */ static void Fdrop (void) { nest(); Dfalse(); } static void Fpass (void) { nest(); Pelif(); } static void Ftrue (void) { nest(); Strue(); } static void Ffalse(void) { nest(); Sfalse(); } /* variable pedantry for obfuscated lines */ static void Oiffy (void) { if (!iocccok) Eioccc(); Fpass(); ignoreon(); } static void Oif (void) { if (!iocccok) Eioccc(); Fpass(); } static void Oelif (void) { if (!iocccok) Eioccc(); Pelif(); } /* ignore comments in this block */ static void Idrop (void) { Fdrop(); ignoreon(); } static void Itrue (void) { Ftrue(); ignoreon(); } static void Ifalse(void) { Ffalse(); ignoreon(); } /* modify this line */ static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); } static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); } static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); } static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); } static state_fn * const trans_table[IS_COUNT][LT_COUNT] = { /* IS_OUTSIDE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Eendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eendif, print, done, abort }, /* IS_FALSE_PREFIX */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Strue, Sfalse,Selse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Mpass, Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_PREFIX */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Dfalse,Dfalse,Dfalse,Delse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, print, Eeof, abort }, /* IS_PASS_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Pelif, Mtrue, Delif, Pelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Pelif, Oelif, Oelif, Pelse, Pendif, print, Eeof, abort }, /* IS_FALSE_MIDDLE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Pelif, Mtrue, Delif, Pelse, Pendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eioccc,Eioccc,Eioccc,Eioccc,Eioccc, drop, Eeof, abort }, /* IS_TRUE_MIDDLE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Melif, Melif, Melif, Melse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eioccc,Eioccc,Eioccc,Eioccc,Pendif, print, Eeof, abort }, /* IS_PASS_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Pendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Pendif, print, Eeof, abort }, /* IS_FALSE_ELSE */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Eelif, Eelif, Eelif, Eelse, Eioccc, drop, Eeof, abort }, /* IS_TRUE_ELSE */ { Itrue, Ifalse,Fpass, Ftrue, Ffalse,Eelif, Eelif, Eelif, Eelse, Dendif, Oiffy, Oiffy, Fpass, Oif, Oif, Eelif, Eelif, Eelif, Eelse, Eioccc, print, Eeof, abort }, /* IS_FALSE_TRAILER */ { Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Dendif, Idrop, Idrop, Fdrop, Fdrop, Fdrop, Dfalse,Dfalse,Dfalse,Delse, Eioccc, drop, Eeof, abort } /*TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF TRUEI FALSEI IF TRUE FALSE ELIF ELTRUE ELFALSE ELSE ENDIF (DODGY) PLAIN EOF ERROR */ }; /* * State machine utility functions */ static void ignoreoff(void) { if (depth == 0) abort(); /* bug */ ignoring[depth] = ignoring[depth-1]; } static void ignoreon(void) { ignoring[depth] = true; } static void keywordedit(const char *replacement) { snprintf(keyword, tline + sizeof(tline) - keyword, "%s%s", replacement, newline); print(); } static void nest(void) { if (depth > MAXDEPTH-1) abort(); /* bug */ if (depth == MAXDEPTH-1) error("Too many levels of nesting"); depth += 1; stifline[depth] = linenum; } static void unnest(void) { if (depth == 0) abort(); /* bug */ depth -= 1; } static void state(Ifstate is) { ifstate[depth] = is; } /* * Write a line to the output or not, according to command line options. */ static void flushline(bool keep) { if (symlist) return; if (keep ^ complement) { bool blankline = tline[strspn(tline, " \t\r\n")] == '\0'; if (blankline && compblank && blankcount != blankmax) { delcount += 1; blankcount += 1; } else { if (lnnum && delcount > 0) printf("#line %d%s", linenum, newline); fputs(tline, output); delcount = 0; blankmax = blankcount = blankline ? blankcount + 1 : 0; } } else { if (lnblank) fputs(newline, output); exitstat = 1; delcount += 1; blankcount = 0; } if (debugging) fflush(output); } /* * The driver for the state machine. */ static void process(void) { /* When compressing blank lines, act as if the file is preceded by a large number of blank lines. */ blankmax = blankcount = 1000; for (;;) { Linetype lineval = parseline(); trans_table[ifstate[depth]][lineval](); debug("process line %d %s -> %s depth %d", linenum, linetype_name[lineval], ifstate_name[ifstate[depth]], depth); } } /* * Flush the output and handle errors. */ static void closeout(void) { if (symdepth && !zerosyms) printf("\n"); if (fclose(output) == EOF) { warn("couldn't write to %s", ofilename); if (overwriting) { unlink(tempname); errx(2, "%s unchanged", filename); } else { exit(2); } } } /* * Clean up and exit. */ static void done(void) { if (incomment) error("EOF in comment"); closeout(); if (overwriting && rename(tempname, ofilename) == -1) { warn("couldn't rename temporary file"); unlink(tempname); errx(2, "%s unchanged", ofilename); } exit(exitstat); } /* * Parse a line and determine its type. We keep the preprocessor line * parser state between calls in the global variable linestate, with * help from skipcomment(). */ static Linetype parseline(void) { const char *cp; int cursym; int kwlen; Linetype retval; Comment_state wascomment; linenum++; if (fgets(tline, MAXLINE, input) == NULL) return (LT_EOF); if (newline == NULL) { if (strrchr(tline, '\n') == strrchr(tline, '\r') + 1) newline = newline_crlf; else newline = newline_unix; } retval = LT_PLAIN; wascomment = incomment; cp = skipcomment(tline); if (linestate == LS_START) { if (*cp == '#') { linestate = LS_HASH; firstsym = true; cp = skipcomment(cp + 1); } else if (*cp != '\0') linestate = LS_DIRTY; } if (!incomment && linestate == LS_HASH) { keyword = tline + (cp - tline); cp = skipsym(cp); kwlen = cp - keyword; /* no way can we deal with a continuation inside a keyword */ if (strncmp(cp, "\\\r\n", 3) == 0 || strncmp(cp, "\\\n", 2) == 0) Eioccc(); if (strlcmp("ifdef", keyword, kwlen) == 0 || strlcmp("ifndef", keyword, kwlen) == 0) { cp = skipcomment(cp); if ((cursym = findsym(cp)) < 0) retval = LT_IF; else { retval = (keyword[2] == 'n') ? LT_FALSE : LT_TRUE; if (value[cursym] == NULL) retval = (retval == LT_TRUE) ? LT_FALSE : LT_TRUE; if (ignore[cursym]) retval = (retval == LT_TRUE) ? LT_TRUEI : LT_FALSEI; } cp = skipsym(cp); } else if (strlcmp("if", keyword, kwlen) == 0) retval = ifeval(&cp); else if (strlcmp("elif", keyword, kwlen) == 0) retval = ifeval(&cp) - LT_IF + LT_ELIF; else if (strlcmp("else", keyword, kwlen) == 0) retval = LT_ELSE; else if (strlcmp("endif", keyword, kwlen) == 0) retval = LT_ENDIF; else { linestate = LS_DIRTY; retval = LT_PLAIN; } cp = skipcomment(cp); if (*cp != '\0') { linestate = LS_DIRTY; if (retval == LT_TRUE || retval == LT_FALSE || retval == LT_TRUEI || retval == LT_FALSEI) retval = LT_IF; if (retval == LT_ELTRUE || retval == LT_ELFALSE) retval = LT_ELIF; } if (retval != LT_PLAIN && (wascomment || incomment)) { retval += LT_DODGY; if (incomment) linestate = LS_DIRTY; } /* skipcomment normally changes the state, except if the last line of the file lacks a newline, or if there is too much whitespace in a directive */ if (linestate == LS_HASH) { size_t len = cp - tline; if (fgets(tline + len, MAXLINE - len, input) == NULL) { /* append the missing newline */ strcpy(tline + len, newline); cp += strlen(newline); linestate = LS_START; } else { linestate = LS_DIRTY; } } } if (linestate == LS_DIRTY) { while (*cp != '\0') cp = skipcomment(cp + 1); } debug("parser line %d state %s comment %s line", linenum, comment_name[incomment], linestate_name[linestate]); return (retval); } /* * These are the binary operators that are supported by the expression * evaluator. */ static Linetype op_strict(int *p, int v, Linetype at, Linetype bt) { if(at == LT_IF || bt == LT_IF) return (LT_IF); return (*p = v, v ? LT_TRUE : LT_FALSE); } static Linetype op_lt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a < b, at, bt); } static Linetype op_gt(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a > b, at, bt); } static Linetype op_le(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a <= b, at, bt); } static Linetype op_ge(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a >= b, at, bt); } static Linetype op_eq(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a == b, at, bt); } static Linetype op_ne(int *p, Linetype at, int a, Linetype bt, int b) { return op_strict(p, a != b, at, bt); } static Linetype op_or(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_TRUE || bt == LT_TRUE)) return (*p = 1, LT_TRUE); return op_strict(p, a || b, at, bt); } static Linetype op_and(int *p, Linetype at, int a, Linetype bt, int b) { if (!strictlogic && (at == LT_FALSE || bt == LT_FALSE)) return (*p = 0, LT_FALSE); return op_strict(p, a && b, at, bt); } /* * An evaluation function takes three arguments, as follows: (1) a pointer to * an element of the precedence table which lists the operators at the current * level of precedence; (2) a pointer to an integer which will receive the * value of the expression; and (3) a pointer to a char* that points to the * expression to be evaluated and that is updated to the end of the expression * when evaluation is complete. The function returns LT_FALSE if the value of * the expression is zero, LT_TRUE if it is non-zero, LT_IF if the expression * depends on an unknown symbol, or LT_ERROR if there is a parse failure. */ struct ops; typedef Linetype eval_fn(const struct ops *, int *, const char **); static eval_fn eval_table, eval_unary; /* * The precedence table. Expressions involving binary operators are evaluated * in a table-driven way by eval_table. When it evaluates a subexpression it * calls the inner function with its first argument pointing to the next * element of the table. Innermost expressions have special non-table-driven * handling. */ static const struct ops { eval_fn *inner; struct op { const char *str; Linetype (*fn)(int *, Linetype, int, Linetype, int); } op[5]; } eval_ops[] = { { eval_table, { { "||", op_or } } }, { eval_table, { { "&&", op_and } } }, { eval_table, { { "==", op_eq }, { "!=", op_ne } } }, { eval_unary, { { "<=", op_le }, { ">=", op_ge }, { "<", op_lt }, { ">", op_gt } } } }; /* * Function for evaluating the innermost parts of expressions, * viz. !expr (expr) number defined(symbol) symbol * We reset the constexpr flag in the last two cases. */ static Linetype eval_unary(const struct ops *ops, int *valp, const char **cpp) { const char *cp; char *ep; int sym; bool defparen; Linetype lt; cp = skipcomment(*cpp); if (*cp == '!') { debug("eval%d !", ops - eval_ops); cp++; lt = eval_unary(ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); if (lt != LT_IF) { *valp = !*valp; lt = *valp ? LT_TRUE : LT_FALSE; } } else if (*cp == '(') { cp++; debug("eval%d (", ops - eval_ops); lt = eval_table(eval_ops, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); cp = skipcomment(cp); if (*cp++ != ')') return (LT_ERROR); } else if (isdigit((unsigned char)*cp)) { debug("eval%d number", ops - eval_ops); *valp = strtol(cp, &ep, 0); if (ep == cp) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipsym(cp); } else if (strncmp(cp, "defined", 7) == 0 && endsym(cp[7])) { cp = skipcomment(cp+7); debug("eval%d defined", ops - eval_ops); if (*cp == '(') { cp = skipcomment(cp+1); defparen = true; } else { defparen = false; } sym = findsym(cp); if (sym < 0) { lt = LT_IF; } else { *valp = (value[sym] != NULL); lt = *valp ? LT_TRUE : LT_FALSE; } cp = skipsym(cp); cp = skipcomment(cp); if (defparen && *cp++ != ')') return (LT_ERROR); constexpr = false; } else if (!endsym(*cp)) { debug("eval%d symbol", ops - eval_ops); sym = findsym(cp); cp = skipsym(cp); if (sym < 0) { lt = LT_IF; cp = skipargs(cp); } else if (value[sym] == NULL) { *valp = 0; lt = LT_FALSE; } else { *valp = strtol(value[sym], &ep, 0); if (*ep != '\0' || ep == value[sym]) return (LT_ERROR); lt = *valp ? LT_TRUE : LT_FALSE; cp = skipargs(cp); } constexpr = false; } else { debug("eval%d bad expr", ops - eval_ops); return (LT_ERROR); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); return (lt); } /* * Table-driven evaluation of binary operators. */ static Linetype eval_table(const struct ops *ops, int *valp, const char **cpp) { const struct op *op; const char *cp; int val; Linetype lt, rt; debug("eval%d", ops - eval_ops); cp = *cpp; lt = ops->inner(ops+1, valp, &cp); if (lt == LT_ERROR) return (LT_ERROR); for (;;) { cp = skipcomment(cp); for (op = ops->op; op->str != NULL; op++) if (strncmp(cp, op->str, strlen(op->str)) == 0) break; if (op->str == NULL) break; cp += strlen(op->str); debug("eval%d %s", ops - eval_ops, op->str); rt = ops->inner(ops+1, &val, &cp); if (rt == LT_ERROR) return (LT_ERROR); lt = op->fn(valp, lt, *valp, rt, val); } *cpp = cp; debug("eval%d = %d", ops - eval_ops, *valp); debug("eval%d lt = %s", ops - eval_ops, linetype_name[lt]); return (lt); } /* * Evaluate the expression on a #if or #elif line. If we can work out * the result we return LT_TRUE or LT_FALSE accordingly, otherwise we * return just a generic LT_IF. */ static Linetype ifeval(const char **cpp) { int ret; int val = 0; debug("eval %s", *cpp); constexpr = killconsts ? false : true; ret = eval_table(eval_ops, &val, cpp); debug("eval = %d", val); return (constexpr ? LT_IF : ret == LT_ERROR ? LT_IF : ret); } /* * Skip over comments, strings, and character literals and stop at the * next character position that is not whitespace. Between calls we keep * the comment state in the global variable incomment, and we also adjust * the global variable linestate when we see a newline. * XXX: doesn't cope with the buffer splitting inside a state transition. */ static const char * skipcomment(const char *cp) { if (text || ignoring[depth]) { for (; isspace((unsigned char)*cp); cp++) if (*cp == '\n') linestate = LS_START; return (cp); } while (*cp != '\0') /* don't reset to LS_START after a line continuation */ if (strncmp(cp, "\\\r\n", 3) == 0) cp += 3; else if (strncmp(cp, "\\\n", 2) == 0) cp += 2; else switch (incomment) { case NO_COMMENT: if (strncmp(cp, "/\\\r\n", 4) == 0) { incomment = STARTING_COMMENT; cp += 4; } else if (strncmp(cp, "/\\\n", 3) == 0) { incomment = STARTING_COMMENT; cp += 3; } else if (strncmp(cp, "/*", 2) == 0) { incomment = C_COMMENT; cp += 2; } else if (strncmp(cp, "//", 2) == 0) { incomment = CXX_COMMENT; cp += 2; } else if (strncmp(cp, "\'", 1) == 0) { incomment = CHAR_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\"", 1) == 0) { incomment = STRING_LITERAL; linestate = LS_DIRTY; cp += 1; } else if (strncmp(cp, "\n", 1) == 0) { linestate = LS_START; cp += 1; } else if (strchr(" \r\t", *cp) != NULL) { cp += 1; } else return (cp); continue; case CXX_COMMENT: if (strncmp(cp, "\n", 1) == 0) { incomment = NO_COMMENT; linestate = LS_START; } cp += 1; continue; case CHAR_LITERAL: case STRING_LITERAL: if ((incomment == CHAR_LITERAL && cp[0] == '\'') || (incomment == STRING_LITERAL && cp[0] == '\"')) { incomment = NO_COMMENT; cp += 1; } else if (cp[0] == '\\') { if (cp[1] == '\0') cp += 1; else cp += 2; } else if (strncmp(cp, "\n", 1) == 0) { if (incomment == CHAR_LITERAL) error("unterminated char literal"); else error("unterminated string literal"); } else cp += 1; continue; case C_COMMENT: if (strncmp(cp, "*\\\r\n", 4) == 0) { incomment = FINISHING_COMMENT; cp += 4; } else if (strncmp(cp, "*\\\n", 3) == 0) { incomment = FINISHING_COMMENT; cp += 3; } else if (strncmp(cp, "*/", 2) == 0) { incomment = NO_COMMENT; cp += 2; } else cp += 1; continue; case STARTING_COMMENT: if (*cp == '*') { incomment = C_COMMENT; cp += 1; } else if (*cp == '/') { incomment = CXX_COMMENT; cp += 1; } else { incomment = NO_COMMENT; linestate = LS_DIRTY; } continue; case FINISHING_COMMENT: if (*cp == '/') { incomment = NO_COMMENT; cp += 1; } else incomment = C_COMMENT; continue; default: abort(); /* bug */ } return (cp); } /* * Skip macro arguments. */ static const char * skipargs(const char *cp) { const char *ocp = cp; int level = 0; cp = skipcomment(cp); if (*cp != '(') return (cp); do { if (*cp == '(') level++; if (*cp == ')') level--; cp = skipcomment(cp+1); } while (level != 0 && *cp != '\0'); if (level == 0) return (cp); else /* Rewind and re-detect the syntax error later. */ return (ocp); } /* * Skip over an identifier. */ static const char * skipsym(const char *cp) { while (!endsym(*cp)) ++cp; return (cp); } /* * Look for the symbol in the symbol table. If it is found, we return * the symbol table index, else we return -1. */ static int findsym(const char *str) { const char *cp; int symind; cp = skipsym(str); if (cp == str) return (-1); if (symlist) { if (symdepth && firstsym) printf("%s%3d", zerosyms ? "" : "\n", depth); firstsym = zerosyms = false; printf("%s%.*s%s", symdepth ? " " : "", (int)(cp-str), str, symdepth ? "" : "\n"); /* we don't care about the value of the symbol */ return (0); } for (symind = 0; symind < nsyms; ++symind) { if (strlcmp(symname[symind], str, cp-str) == 0) { debug("findsym %s %s", symname[symind], value[symind] ? value[symind] : ""); return (symind); } } return (-1); } /* * Add a symbol to the symbol table. */ static void addsym(bool ignorethis, bool definethis, char *sym) { int symind; char *val; symind = findsym(sym); if (symind < 0) { if (nsyms >= MAXSYMS) errx(2, "too many symbols"); symind = nsyms++; } symname[symind] = sym; ignore[symind] = ignorethis; val = sym + (skipsym(sym) - sym); if (definethis) { if (*val == '=') { value[symind] = val+1; *val = '\0'; } else if (*val == '\0') value[symind] = "1"; else usage(); } else { if (*val != '\0') usage(); value[symind] = NULL; } debug("addsym %s=%s", symname[symind], value[symind] ? value[symind] : "undef"); } /* * Compare s with n characters of t. * The same as strncmp() except that it checks that s[n] == '\0'. */ static int strlcmp(const char *s, const char *t, size_t n) { while (n-- && *t != '\0') if (*s != *t) return ((unsigned char)*s - (unsigned char)*t); else ++s, ++t; return ((unsigned char)*s); } /* * Diagnostics. */ static void debug(const char *msg, ...) { va_list ap; if (debugging) { va_start(ap, msg); vwarnx(msg, ap); va_end(ap); } } static void error(const char *msg) { if (depth == 0) warnx("%s: %d: %s", filename, linenum, msg); else warnx("%s: %d: %s (#if line %d depth %d)", filename, linenum, msg, stifline[depth], depth); closeout(); errx(2, "output may be truncated"); }
gpl-2.0
AICP/android_kernel_asus_tf201
fs/nls/nls_cp874.c
12564
10995
/* * linux/fs/nls/nls_cp874.c * * Charset cp874 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2026, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0x90*/ 0x0000, 0x2018, 0x2019, 0x201c, 0x201d, 0x2022, 0x2013, 0x2014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 0xa0*/ 0x00a0, 0x0e01, 0x0e02, 0x0e03, 0x0e04, 0x0e05, 0x0e06, 0x0e07, 0x0e08, 0x0e09, 0x0e0a, 0x0e0b, 0x0e0c, 0x0e0d, 0x0e0e, 0x0e0f, /* 0xb0*/ 0x0e10, 0x0e11, 0x0e12, 0x0e13, 0x0e14, 0x0e15, 0x0e16, 0x0e17, 0x0e18, 0x0e19, 0x0e1a, 0x0e1b, 0x0e1c, 0x0e1d, 0x0e1e, 0x0e1f, /* 0xc0*/ 0x0e20, 0x0e21, 0x0e22, 0x0e23, 0x0e24, 0x0e25, 0x0e26, 0x0e27, 0x0e28, 0x0e29, 0x0e2a, 0x0e2b, 0x0e2c, 0x0e2d, 0x0e2e, 0x0e2f, /* 0xd0*/ 0x0e30, 0x0e31, 0x0e32, 0x0e33, 0x0e34, 0x0e35, 0x0e36, 0x0e37, 0x0e38, 0x0e39, 0x0e3a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0e3f, /* 0xe0*/ 0x0e40, 0x0e41, 0x0e42, 0x0e43, 0x0e44, 0x0e45, 0x0e46, 0x0e47, 0x0e48, 0x0e49, 0x0e4a, 0x0e4b, 0x0e4c, 0x0e4d, 0x0e4e, 0x0e4f, /* 0xf0*/ 0x0e50, 0x0e51, 0x0e52, 0x0e53, 0x0e54, 0x0e55, 0x0e56, 0x0e57, 0x0e58, 0x0e59, 0x0e5a, 0x0e5b, 0x0000, 0x0000, 0x0000, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char page0e[256] = { 0x00, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x00-0x07 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x08-0x0f */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0x10-0x17 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0x18-0x1f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x50-0x57 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x96, 0x97, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x91, 0x92, 0x00, 0x00, 0x93, 0x94, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x95, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0x20-0x27 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page0e, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp874", .alias = "tis-620", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, .owner = THIS_MODULE, }; static int __init init_nls_cp874(void) { return register_nls(&table); } static void __exit exit_nls_cp874(void) { unregister_nls(&table); } module_init(init_nls_cp874) module_exit(exit_nls_cp874) MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_NLS(tis-620);
gpl-2.0
sharvanath/ModNet_Linux
arch/mips/pci/pci-emma2rh.c
13844
2700
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This file is based on the arch/mips/ddb5xxx/ddb5477/pci.c * * Copyright 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <asm/bootinfo.h> #include <asm/emma/emma2rh.h> static struct resource pci_io_resource = { .name = "pci IO space", .start = EMMA2RH_PCI_IO_BASE, .end = EMMA2RH_PCI_IO_BASE + EMMA2RH_PCI_IO_SIZE - 1, .flags = IORESOURCE_IO, }; static struct resource pci_mem_resource = { .name = "pci memory space", .start = EMMA2RH_PCI_MEM_BASE, .end = EMMA2RH_PCI_MEM_BASE + EMMA2RH_PCI_MEM_SIZE - 1, .flags = IORESOURCE_MEM, }; extern struct pci_ops emma2rh_pci_ops; static struct pci_controller emma2rh_pci_controller = { .pci_ops = &emma2rh_pci_ops, .mem_resource = &pci_mem_resource, .io_resource = &pci_io_resource, .mem_offset = -0x04000000, .io_offset = 0, }; static void __init emma2rh_pci_init(void) { /* setup PCI interface */ emma2rh_out32(EMMA2RH_PCI_ARBIT_CTR, 0x70f); emma2rh_out32(EMMA2RH_PCI_IWIN0_CTR, 0x80000a18); emma2rh_out32(EMMA2RH_PCI_CONFIG_BASE + PCI_COMMAND, PCI_STATUS_DEVSEL_MEDIUM | PCI_STATUS_CAP_LIST | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); emma2rh_out32(EMMA2RH_PCI_CONFIG_BASE + PCI_BASE_ADDRESS_0, 0x10000000); emma2rh_out32(EMMA2RH_PCI_CONFIG_BASE + PCI_BASE_ADDRESS_1, 0x00000000); emma2rh_out32(EMMA2RH_PCI_IWIN0_CTR, 0x12000000 | 0x218); emma2rh_out32(EMMA2RH_PCI_IWIN1_CTR, 0x18000000 | 0x600); emma2rh_out32(EMMA2RH_PCI_INIT_ESWP, 0x00000200); emma2rh_out32(EMMA2RH_PCI_TWIN_CTR, 0x00009200); emma2rh_out32(EMMA2RH_PCI_TWIN_BADR, 0x00000000); emma2rh_out32(EMMA2RH_PCI_TWIN0_DADR, 0x00000000); emma2rh_out32(EMMA2RH_PCI_TWIN1_DADR, 0x00000000); } static int __init emma2rh_pci_setup(void) { emma2rh_pci_init(); register_pci_controller(&emma2rh_pci_controller); return 0; } arch_initcall(emma2rh_pci_setup);
gpl-2.0
pkirchhofer/android-kernel
arch/powerpc/boot/treeboot-walnut.c
14100
2215
/* * Old U-boot compatibility for Walnut * * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com> * * Copyright 2007 IBM Corporation * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "dcr.h" #include "4xx.h" #include "io.h" BSS_STACK(4096); static void walnut_flashsel_fixup(void) { void *devp, *sram; u32 reg_flash[3] = {0x0, 0x0, 0x80000}; u32 reg_sram[3] = {0x0, 0x0, 0x80000}; u8 *fpga; u8 fpga_brds1 = 0x0; devp = finddevice("/plb/ebc/fpga"); if (!devp) fatal("Couldn't locate FPGA node\n\r"); if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) fatal("no virtual-reg property\n\r"); fpga_brds1 = in_8(fpga); devp = finddevice("/plb/ebc/flash"); if (!devp) fatal("Couldn't locate flash node\n\r"); if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash)) fatal("flash reg property has unexpected size\n\r"); sram = finddevice("/plb/ebc/sram"); if (!sram) fatal("Couldn't locate sram node\n\r"); if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram)) fatal("sram reg property has unexpected size\n\r"); if (fpga_brds1 & 0x1) { reg_flash[1] ^= 0x80000; reg_sram[1] ^= 0x80000; } setprop(devp, "reg", reg_flash, sizeof(reg_flash)); setprop(sram, "reg", reg_sram, sizeof(reg_sram)); } #define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b static void walnut_fixups(void) { ibm4xx_sdram_fixup_memsize(); ibm405gp_fixup_clocks(33330000, 0xa8c000); ibm4xx_quiesce_eth((u32 *)0xef600800, NULL); ibm4xx_fixup_ebc_ranges("/plb/ebc"); walnut_flashsel_fixup(); dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF); } void platform_init(void) { unsigned long end_of_ram = 0x2000000; unsigned long avail_ram = end_of_ram - (unsigned long) _end; simple_alloc_init(_end, avail_ram, 32, 32); platform_ops.fixups = walnut_fixups; platform_ops.exit = ibm40x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
teemodk/android_kernel_htc_endeavoru
drivers/video/tegra/fb.c
21
20121
/* * drivers/video/tegra/fb.c * * Copyright (C) 2010 Google, Inc. * Author: Erik Gilling <konkers@android.com> * Colin Cross <ccross@android.com> * Travis Geiselbrecht <travis@palm.com> * * Copyright (c) 2010-2012, NVIDIA CORPORATION, All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fb.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/workqueue.h> #include <asm/atomic.h> #include <video/tegrafb.h> #include <mach/dc.h> #include <mach/fb.h> #include <mach/board_htc.h> #include <linux/nvhost.h> #include <linux/nvmap.h> #include "host/dev.h" #include "nvmap/nvmap.h" #include "dc/dc_priv.h" #define ONMODE_CHARGE() ((board_mfg_mode() == BOARD_MFG_MODE_NORMAL) && \ (board_zchg_mode() & 0x2) && \ (dc == tegra_fb->win->dc)) /* Pad pitch to 16-byte boundary. */ #define TEGRA_LINEAR_PITCH_ALIGNMENT 32 struct tegra_usb_projector_info usb_pjt_info; struct tegra_fb_info { struct tegra_dc_win *win; struct nvhost_device *ndev; struct fb_info *info; bool valid; struct resource *fb_mem; int xres; int yres; }; /* palette array used by the fbcon */ static u32 pseudo_palette[16]; static int tegra_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct tegra_fb_info *tegra_fb = info->par; struct tegra_dc *dc = tegra_fb->win->dc; struct tegra_dc_out_ops *ops = dc->out_ops; struct fb_videomode mode; if ((var->yres * var->xres * var->bits_per_pixel / 8 * 2) > info->screen_size) return -EINVAL; /* Apply mode filter for HDMI only -LVDS supports only fix mode */ if (ops && ops->mode_filter) { fb_var_to_videomode(&mode, var); if (!ops->mode_filter(dc, &mode)) return -EINVAL; /* Mode filter may have modified the mode */ fb_videomode_to_var(var, &mode); } /* Double yres_virtual to allow double buffering through pan_display */ var->yres_virtual = var->yres * 2; return 0; } static int tegra_fb_set_par(struct fb_info *info) { struct tegra_fb_info *tegra_fb = info->par; struct fb_var_screeninfo *var = &info->var; if (var->bits_per_pixel) { /* we only support RGB ordering for now */ switch (var->bits_per_pixel) { case 32: var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8; break; case 16: var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5; break; default: return -EINVAL; } info->fix.line_length = var->xres * var->bits_per_pixel / 8; /* Pad the stride to 16-byte boundary. */ info->fix.line_length = round_up(info->fix.line_length, TEGRA_LINEAR_PITCH_ALIGNMENT); tegra_fb->win->stride = info->fix.line_length; tegra_fb->win->stride_uv = 0; tegra_fb->win->phys_addr_u = 0; tegra_fb->win->phys_addr_v = 0; } if (var->pixclock) { bool stereo; struct fb_videomode m; fb_var_to_videomode(&m, var); info->mode = (struct fb_videomode *) fb_find_nearest_mode(&m, &info->modelist); if (!info->mode) { dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n"); return -EINVAL; } /* * only enable stereo if the mode supports it and * client requests it */ stereo = !!(var->vmode & info->mode->vmode & #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT FB_VMODE_STEREO_FRAME_PACK); #else FB_VMODE_STEREO_LEFT_RIGHT); #endif tegra_dc_set_fb_mode(tegra_fb->win->dc, info->mode, stereo); tegra_fb->win->w.full = dfixed_const(info->mode->xres); tegra_fb->win->h.full = dfixed_const(info->mode->yres); tegra_fb->win->out_w = info->mode->xres; tegra_fb->win->out_h = info->mode->yres; } return 0; } static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { u32 v; if (regno >= 16) return -EINVAL; red = (red >> (16 - info->var.red.length)); green = (green >> (16 - info->var.green.length)); blue = (blue >> (16 - info->var.blue.length)); v = (red << var->red.offset) | (green << var->green.offset) | (blue << var->blue.offset); ((u32 *)info->pseudo_palette)[regno] = v; } return 0; } static int tegra_fb_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct tegra_fb_info *tegra_fb = info->par; struct tegra_dc *dc = tegra_fb->win->dc; int i; u16 *red = cmap->red; u16 *green = cmap->green; u16 *blue = cmap->blue; int start = cmap->start; if (((unsigned)start > 255) || ((start + cmap->len) > 256)) return -EINVAL; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { /* * For now we are considering color schemes with * cmap->len <=16 as special case of basic color * scheme to support fbconsole.But for DirectColor * visuals(like the one we actually have, that include * a HW LUT),the way it's intended to work is that the * actual LUT HW is programmed to the intended values, * even for small color maps like those with 16 or fewer * entries. The pseudo_palette is then programmed to the * identity transform. */ if (cmap->len <= 16) { /* Low-color schemes like fbconsole*/ u16 *transp = cmap->transp; u_int vtransp = 0xffff; for (i = 0; i < cmap->len; i++) { if (transp) vtransp = *transp++; if (tegra_fb_setcolreg(start++, *red++, *green++, *blue++, vtransp, info)) return -EINVAL; } } else { /* High-color schemes*/ for (i = 0; i < cmap->len; i++) { dc->fb_lut.r[start+i] = *red++ >> 8; dc->fb_lut.g[start+i] = *green++ >> 8; dc->fb_lut.b[start+i] = *blue++ >> 8; } tegra_dc_update_lut(dc, -1, -1); } } return 0; } #if defined(CONFIG_FRAMEBUFFER_CONSOLE) static void tegra_fb_flip_win(struct tegra_fb_info *tegra_fb) { struct tegra_dc_win *win = tegra_fb->win; struct fb_info *info = tegra_fb->info; win->x.full = dfixed_const(0); win->y.full = dfixed_const(0); win->w.full = dfixed_const(tegra_fb->xres); win->h.full = dfixed_const(tegra_fb->yres); /* TODO: set to output res dc */ win->out_x = 0; win->out_y = 0; win->out_w = tegra_fb->xres; win->out_h = tegra_fb->yres; win->z = 0; win->phys_addr = info->fix.smem_start + (info->var.yoffset * info->fix.line_length) + (info->var.xoffset * (info->var.bits_per_pixel / 8)); win->virt_addr = info->screen_base; win->phys_addr_u = 0; win->phys_addr_v = 0; win->stride = info->fix.line_length; win->stride_uv = 0; switch (info->var.bits_per_pixel) { default: WARN_ON(1); /* fall through */ case 32: tegra_fb->win->fmt = TEGRA_WIN_FMT_R8G8B8A8; break; case 16: tegra_fb->win->fmt = TEGRA_WIN_FMT_B5G6R5; break; } win->flags = TEGRA_WIN_FLAG_ENABLED; tegra_dc_update_windows(&tegra_fb->win, 1); tegra_dc_sync_windows(&tegra_fb->win, 1); } #endif static int tegra_fb_blank(int blank, struct fb_info *info) { struct tegra_fb_info *tegra_fb = info->par; struct tegra_dc *dc = tegra_fb->win->dc; struct tegra_dc_out *out = dc->out; struct tegra_dsi_out *dsi = out->dsi; switch (blank) { case FB_BLANK_UNBLANK: dev_dbg(&tegra_fb->ndev->dev, "unblank\n"); if ((dsi) && (dsi->dsi_cabc_dimming_on_cmd)) dc->request_dimming_on = true; tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED; tegra_dc_enable(tegra_fb->win->dc); return 0; case FB_BLANK_NORMAL: dev_dbg(&tegra_fb->ndev->dev, "blank - normal\n"); tegra_dc_blank(tegra_fb->win->dc); return 0; case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: dev_dbg(&tegra_fb->ndev->dev, "blank - powerdown\n"); if ((dsi) && (dsi->dsi_cabc_dimming_on_cmd)) { del_timer_sync(&dc->dimming_update_timer); flush_workqueue(dc->dimming_wq); } tegra_dc_disable(tegra_fb->win->dc); return 0; default: return -ENOTTY; } } static int tegra_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct tegra_fb_info *tegra_fb = info->par; char __iomem *flush_start; char __iomem *flush_end; u32 addr; int i; struct tegra_dc *dc = tegra_dc_get_dc(0); /* This is only for china sku suspend/resume battery update and only for DC0 Initialize window. It wouldn't support yuv in framebuffer. Therefore, we set RGBX as default and disable the other windows. */ struct tegra_dc_win *dcwins[DC_N_WINDOWS]; if (ONMODE_CHARGE()) { for (i = 0; i < DC_N_WINDOWS; i++) { dcwins[i] = tegra_dc_get_window(tegra_fb->win->dc, i); dcwins[i]->fmt = TEGRA_WIN_FMT_R8G8B8A8; if (tegra_fb->win != dcwins[i]) dcwins[i]->flags &= ~TEGRA_WIN_FLAG_ENABLED; else dcwins[i]->flags |= TEGRA_WIN_FLAG_ENABLED; } } if (!tegra_fb->win->cur_handle) { flush_start = info->screen_base + (var->yoffset * info->fix.line_length); flush_end = flush_start + (var->yres * info->fix.line_length); info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; addr = info->fix.smem_start + (var->yoffset * info->fix.line_length) + (var->xoffset * (var->bits_per_pixel/8)); tegra_fb->win->phys_addr = addr; tegra_fb->win->flags = TEGRA_WIN_FLAG_ENABLED; tegra_fb->win->virt_addr = info->screen_base; tegra_fb->win->x.full = dfixed_const(0); tegra_fb->win->y.full = dfixed_const(0); tegra_fb->win->w.full = dfixed_const(var->xres); tegra_fb->win->h.full = dfixed_const(var->yres); tegra_fb->win->out_x = 0; tegra_fb->win->out_y = 0; tegra_fb->win->out_w = var->xres; tegra_fb->win->out_h = var->yres; tegra_fb->win->z = 0; tegra_fb->win->stride = info->fix.line_length; if (ONMODE_CHARGE()) { /*Update all of windows, not only window a, b or c*/ tegra_dc_update_windows(dcwins, DC_N_WINDOWS); tegra_dc_sync_windows(dcwins, DC_N_WINDOWS); } else { tegra_dc_update_windows(&tegra_fb->win, 1); tegra_dc_sync_windows(&tegra_fb->win, 1); } } return 0; } static void tegra_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { cfb_fillrect(info, rect); } static void tegra_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { cfb_copyarea(info, region); } static void tegra_fb_imageblit(struct fb_info *info, const struct fb_image *image) { cfb_imageblit(info, image); } static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct tegra_fb_info *tegra_fb = (struct tegra_fb_info *)info->par; struct tegra_fb_modedb modedb; void __user *argp = (void __user *)arg; struct fb_modelist *modelist; struct fb_vblank vblank = {}; int i; int ret = 0; struct tegra_usb_projector_info tmp_info; switch (cmd) { case FBIO_TEGRA_GET_MODEDB: if (copy_from_user(&modedb, (void __user *)arg, sizeof(modedb))) return -EFAULT; i = 0; list_for_each_entry(modelist, &info->modelist, list) { struct fb_var_screeninfo var; if (i >= modedb.modedb_len) break; /* fb_videomode_to_var doesn't fill out all the members of fb_var_screeninfo */ memset(&var, 0x0, sizeof(var)); fb_videomode_to_var(&var, &modelist->mode); if (copy_to_user((void __user *)&modedb.modedb[i], &var, sizeof(var))) return -EFAULT; i++; if (var.vmode & FB_VMODE_STEREO_MASK) { if (i >= modedb.modedb_len) break; var.vmode &= ~FB_VMODE_STEREO_MASK; if (copy_to_user( (void __user *)&modedb.modedb[i], &var, sizeof(var))) return -EFAULT; i++; } } modedb.modedb_len = i; if (copy_to_user((void __user *)arg, &modedb, sizeof(modedb))) return -EFAULT; break; case FBIO_TEGRA_GET_USB_PROJECTOR_INFO: ret = copy_to_user(argp, &usb_pjt_info, sizeof(usb_pjt_info)); if (ret) return ret; break; case FBIO_TEGRA_SET_USB_PROJECTOR_INFO: ret = copy_from_user(&tmp_info, argp, sizeof(tmp_info)); usb_pjt_info.latest_offset = tmp_info.latest_offset; if (ret) return ret; break; case FBIOGET_VBLANK: tegra_dc_get_fbvblank(tegra_fb->win->dc, &vblank); if (copy_to_user( (void __user *)arg, &vblank, sizeof(vblank))) return -EFAULT; break; case FBIO_WAITFORVSYNC: return tegra_dc_wait_for_vsync(tegra_fb->win->dc); default: return -ENOTTY; } return 0; } int tegra_fb_get_mode(struct tegra_dc *dc) { if (!dc->fb->info->mode) return -1; return dc->fb->info->mode->refresh; } int tegra_fb_set_mode(struct tegra_dc *dc, int fps) { size_t stereo; struct list_head *pos; struct fb_videomode *best_mode = NULL; int curr_diff = INT_MAX; /* difference of best_mode refresh rate */ struct fb_modelist *modelist; struct fb_info *info = dc->fb->info; list_for_each(pos, &info->modelist) { struct fb_videomode *mode; modelist = list_entry(pos, struct fb_modelist, list); mode = &modelist->mode; if (fps <= mode->refresh && curr_diff > (mode->refresh - fps)) { curr_diff = mode->refresh - fps; best_mode = mode; } } if (best_mode) { info->mode = best_mode; stereo = !!(info->var.vmode & info->mode->vmode & #ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT FB_VMODE_STEREO_FRAME_PACK); #else FB_VMODE_STEREO_LEFT_RIGHT); #endif return tegra_dc_set_fb_mode(dc, best_mode, stereo); } return -EIO; } static struct fb_ops tegra_fb_ops = { .owner = THIS_MODULE, .fb_check_var = tegra_fb_check_var, .fb_set_par = tegra_fb_set_par, .fb_setcmap = tegra_fb_setcmap, .fb_blank = tegra_fb_blank, .fb_pan_display = tegra_fb_pan_display, .fb_fillrect = tegra_fb_fillrect, .fb_copyarea = tegra_fb_copyarea, .fb_imageblit = tegra_fb_imageblit, .fb_ioctl = tegra_fb_ioctl, }; void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info, struct fb_monspecs *specs, bool (*mode_filter)(const struct tegra_dc *dc, struct fb_videomode *mode)) { struct fb_event event; int i; mutex_lock(&fb_info->info->lock); fb_destroy_modedb(fb_info->info->monspecs.modedb); fb_destroy_modelist(&fb_info->info->modelist); if (specs == NULL) { struct tegra_dc_mode mode; memset(&fb_info->info->monspecs, 0x0, sizeof(fb_info->info->monspecs)); memset(&mode, 0x0, sizeof(mode)); /* * reset video mode properties to prevent garbage being displayed on 'mode' device. */ fb_info->info->mode = (struct fb_videomode*) NULL; tegra_dc_set_mode(fb_info->win->dc, &mode); mutex_unlock(&fb_info->info->lock); return; } memcpy(&fb_info->info->monspecs, specs, sizeof(fb_info->info->monspecs)); fb_info->info->mode = specs->modedb; for (i = 0; i < specs->modedb_len; i++) { if (mode_filter) { if (mode_filter(fb_info->win->dc, &specs->modedb[i])) fb_add_videomode(&specs->modedb[i], &fb_info->info->modelist); } else { fb_add_videomode(&specs->modedb[i], &fb_info->info->modelist); } } event.info = fb_info->info; fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); mutex_unlock(&fb_info->info->lock); } struct tegra_fb_info *tegra_fb_register(struct nvhost_device *ndev, struct tegra_dc *dc, struct tegra_fb_data *fb_data, struct resource *fb_mem) { struct tegra_dc_win *win; struct fb_info *info; struct tegra_fb_info *tegra_fb; void __iomem *fb_base = NULL; unsigned long fb_size = 0; unsigned long fb_phys = 0; int ret = 0; unsigned stride; win = tegra_dc_get_window(dc, fb_data->win); if (!win) { dev_err(&ndev->dev, "dc does not have a window at index %d\n", fb_data->win); return ERR_PTR(-ENOENT); } info = framebuffer_alloc(sizeof(struct tegra_fb_info), &ndev->dev); if (!info) { ret = -ENOMEM; goto err; } tegra_fb = info->par; tegra_fb->win = win; tegra_fb->ndev = ndev; tegra_fb->fb_mem = fb_mem; tegra_fb->xres = fb_data->xres; tegra_fb->yres = fb_data->yres; if (fb_mem) { fb_size = resource_size(fb_mem); fb_phys = fb_mem->start; fb_base = ioremap_nocache(fb_phys, fb_size); if (!fb_base) { dev_err(&ndev->dev, "fb can't be mapped\n"); ret = -EBUSY; goto err_free; } tegra_fb->valid = true; } stride = tegra_dc_get_stride(dc, 0); if (!stride) /* default to pad the stride to 16-byte boundary. */ stride = round_up(info->fix.line_length, TEGRA_LINEAR_PITCH_ALIGNMENT); info->fbops = &tegra_fb_ops; info->pseudo_palette = pseudo_palette; info->screen_base = fb_base; info->screen_size = fb_size; strlcpy(info->fix.id, "tegra_fb", sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.xpanstep = 1; info->fix.ypanstep = 1; info->fix.accel = FB_ACCEL_NONE; info->fix.smem_start = fb_phys; info->fix.smem_len = fb_size; info->fix.line_length = fb_data->xres * fb_data->bits_per_pixel / 8; info->fix.line_length = stride; info->var.xres = fb_data->xres; info->var.yres = fb_data->yres; info->var.xres_virtual = fb_data->xres; info->var.yres_virtual = fb_data->yres * 2; info->var.bits_per_pixel = fb_data->bits_per_pixel; info->var.activate = FB_ACTIVATE_VBL; info->var.height = tegra_dc_get_out_height(dc); info->var.width = tegra_dc_get_out_width(dc); info->var.pixclock = 0; info->var.left_margin = 0; info->var.right_margin = 0; info->var.upper_margin = 0; info->var.lower_margin = 0; info->var.hsync_len = 0; info->var.vsync_len = 0; info->var.vmode = FB_VMODE_NONINTERLACED; win->x.full = dfixed_const(0); win->y.full = dfixed_const(0); win->w.full = dfixed_const(fb_data->xres); win->h.full = dfixed_const(fb_data->yres); /* TODO: set to output res dc */ win->out_x = 0; win->out_y = 0; win->out_w = fb_data->xres; win->out_h = fb_data->yres; win->z = 0; win->phys_addr = fb_phys; win->virt_addr = fb_base; win->phys_addr_u = 0; win->phys_addr_v = 0; win->stride = info->fix.line_length; win->stride_uv = 0; win->flags = TEGRA_WIN_FLAG_ENABLED; if (fb_mem) tegra_fb_set_par(info); if (register_framebuffer(info)) { dev_err(&ndev->dev, "failed to register framebuffer\n"); ret = -ENODEV; goto err_iounmap_fb; } tegra_fb->info = info; dev_info(&ndev->dev, "probed\n"); if (fb_data->flags & TEGRA_FB_FLIP_ON_PROBE) { tegra_dc_update_windows(&tegra_fb->win, 1); tegra_dc_sync_windows(&tegra_fb->win, 1); } if (dc->mode.pclk > 1000) { struct tegra_dc_mode *mode = &dc->mode; struct fb_videomode vmode; if (dc->out->flags & TEGRA_DC_OUT_ONE_SHOT_MODE) info->var.pixclock = KHZ2PICOS(mode->rated_pclk / 1000); else info->var.pixclock = KHZ2PICOS(mode->pclk / 1000); info->var.left_margin = mode->h_back_porch; info->var.right_margin = mode->h_front_porch; info->var.upper_margin = mode->v_back_porch; info->var.lower_margin = mode->v_front_porch; info->var.hsync_len = mode->h_sync_width; info->var.vsync_len = mode->v_sync_width; /* Keep info->var consistent with info->modelist. */ fb_var_to_videomode(&vmode, &info->var); fb_add_videomode(&vmode, &info->modelist); } return tegra_fb; err_iounmap_fb: if (fb_base) iounmap(fb_base); err_free: framebuffer_release(info); err: return ERR_PTR(ret); } void tegra_fb_unregister(struct tegra_fb_info *fb_info) { struct fb_info *info = fb_info->info; unregister_framebuffer(info); iounmap(info->screen_base); framebuffer_release(info); }
gpl-2.0
zhaojinxin409/shooter-player
src/filters/transform/mpadecfilter/libmad-0.15.0b/synth.c
21
25169
/* * libmad - MPEG audio decoder library * Copyright (C) 2000-2004 Underbit Technologies, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ # ifdef HAVE_CONFIG_H # include "config.h" # endif # include "global.h" # include "fixed.h" # include "frame.h" # include "synth.h" /* * NAME: synth->init() * DESCRIPTION: initialize synth struct */ void mad_synth_init(struct mad_synth *synth) { mad_synth_mute(synth); synth->phase = 0; synth->pcm.samplerate = 0; synth->pcm.channels = 0; synth->pcm.length = 0; } /* * NAME: synth->mute() * DESCRIPTION: zero all polyphase filterbank values, resetting synthesis */ void mad_synth_mute(struct mad_synth *synth) { unsigned int ch, s, v; for (ch = 0; ch < 2; ++ch) { for (s = 0; s < 16; ++s) { for (v = 0; v < 8; ++v) { synth->filter[ch][0][0][s][v] = synth->filter[ch][0][1][s][v] = synth->filter[ch][1][0][s][v] = synth->filter[ch][1][1][s][v] = 0; } } } } /* * An optional optimization called here the Subband Synthesis Optimization * (SSO) improves the performance of subband synthesis at the expense of * accuracy. * * The idea is to simplify 32x32->64-bit multiplication to 32x32->32 such * that extra scaling and rounding are not necessary. This often allows the * compiler to use faster 32-bit multiply-accumulate instructions instead of * explicit 64-bit multiply, shift, and add instructions. * * SSO works like this: a full 32x32->64-bit multiply of two mad_fixed_t * values requires the result to be right-shifted 28 bits to be properly * scaled to the same fixed-point format. Right shifts can be applied at any * time to either operand or to the result, so the optimization involves * careful placement of these shifts to minimize the loss of accuracy. * * First, a 14-bit shift is applied with rounding at compile-time to the D[] * table of coefficients for the subband synthesis window. This only loses 2 * bits of accuracy because the lower 12 bits are always zero. A second * 12-bit shift occurs after the DCT calculation. This loses 12 bits of * accuracy. Finally, a third 2-bit shift occurs just before the sample is * saved in the PCM buffer. 14 + 12 + 2 == 28 bits. */ /* FPM_DEFAULT without OPT_SSO will actually lose accuracy and performance */ # if defined(FPM_DEFAULT) && !defined(OPT_SSO) # define OPT_SSO # endif /* second SSO shift, with rounding */ # if defined(OPT_SSO) # define SHIFT(x) (((x) + (1L << 11)) >> 12) # else # define SHIFT(x) (x) # endif /* possible DCT speed optimization */ # if defined(OPT_SPEED) && defined(MAD_F_MLX) # define OPT_DCTO # define MUL(x, y) \ ({ mad_fixed64hi_t hi; \ mad_fixed64lo_t lo; \ MAD_F_MLX(hi, lo, (x), (y)); \ hi << (32 - MAD_F_SCALEBITS - 3); \ }) # else # undef OPT_DCTO # define MUL(x, y) mad_f_mul((x), (y)) # endif /* * NAME: dct32() * DESCRIPTION: perform fast in[32]->out[32] DCT */ static void dct32(mad_fixed_t const in[32], unsigned int slot, mad_fixed_t lo[16][8], mad_fixed_t hi[16][8]) { mad_fixed_t t0, t1, t2, t3, t4, t5, t6, t7; mad_fixed_t t8, t9, t10, t11, t12, t13, t14, t15; mad_fixed_t t16, t17, t18, t19, t20, t21, t22, t23; mad_fixed_t t24, t25, t26, t27, t28, t29, t30, t31; mad_fixed_t t32, t33, t34, t35, t36, t37, t38, t39; mad_fixed_t t40, t41, t42, t43, t44, t45, t46, t47; mad_fixed_t t48, t49, t50, t51, t52, t53, t54, t55; mad_fixed_t t56, t57, t58, t59, t60, t61, t62, t63; mad_fixed_t t64, t65, t66, t67, t68, t69, t70, t71; mad_fixed_t t72, t73, t74, t75, t76, t77, t78, t79; mad_fixed_t t80, t81, t82, t83, t84, t85, t86, t87; mad_fixed_t t88, t89, t90, t91, t92, t93, t94, t95; mad_fixed_t t96, t97, t98, t99, t100, t101, t102, t103; mad_fixed_t t104, t105, t106, t107, t108, t109, t110, t111; mad_fixed_t t112, t113, t114, t115, t116, t117, t118, t119; mad_fixed_t t120, t121, t122, t123, t124, t125, t126, t127; mad_fixed_t t128, t129, t130, t131, t132, t133, t134, t135; mad_fixed_t t136, t137, t138, t139, t140, t141, t142, t143; mad_fixed_t t144, t145, t146, t147, t148, t149, t150, t151; mad_fixed_t t152, t153, t154, t155, t156, t157, t158, t159; mad_fixed_t t160, t161, t162, t163, t164, t165, t166, t167; mad_fixed_t t168, t169, t170, t171, t172, t173, t174, t175; mad_fixed_t t176; /* costab[i] = cos(PI / (2 * 32) * i) */ # if defined(OPT_DCTO) # define costab1 MAD_F(0x7fd8878e) # define costab2 MAD_F(0x7f62368f) # define costab3 MAD_F(0x7e9d55fc) # define costab4 MAD_F(0x7d8a5f40) # define costab5 MAD_F(0x7c29fbee) # define costab6 MAD_F(0x7a7d055b) # define costab7 MAD_F(0x78848414) # define costab8 MAD_F(0x7641af3d) # define costab9 MAD_F(0x73b5ebd1) # define costab10 MAD_F(0x70e2cbc6) # define costab11 MAD_F(0x6dca0d14) # define costab12 MAD_F(0x6a6d98a4) # define costab13 MAD_F(0x66cf8120) # define costab14 MAD_F(0x62f201ac) # define costab15 MAD_F(0x5ed77c8a) # define costab16 MAD_F(0x5a82799a) # define costab17 MAD_F(0x55f5a4d2) # define costab18 MAD_F(0x5133cc94) # define costab19 MAD_F(0x4c3fdff4) # define costab20 MAD_F(0x471cece7) # define costab21 MAD_F(0x41ce1e65) # define costab22 MAD_F(0x3c56ba70) # define costab23 MAD_F(0x36ba2014) # define costab24 MAD_F(0x30fbc54d) # define costab25 MAD_F(0x2b1f34eb) # define costab26 MAD_F(0x25280c5e) # define costab27 MAD_F(0x1f19f97b) # define costab28 MAD_F(0x18f8b83c) # define costab29 MAD_F(0x12c8106f) # define costab30 MAD_F(0x0c8bd35e) # define costab31 MAD_F(0x0647d97c) # else # define costab1 MAD_F(0x0ffb10f2) /* 0.998795456 */ # define costab2 MAD_F(0x0fec46d2) /* 0.995184727 */ # define costab3 MAD_F(0x0fd3aac0) /* 0.989176510 */ # define costab4 MAD_F(0x0fb14be8) /* 0.980785280 */ # define costab5 MAD_F(0x0f853f7e) /* 0.970031253 */ # define costab6 MAD_F(0x0f4fa0ab) /* 0.956940336 */ # define costab7 MAD_F(0x0f109082) /* 0.941544065 */ # define costab8 MAD_F(0x0ec835e8) /* 0.923879533 */ # define costab9 MAD_F(0x0e76bd7a) /* 0.903989293 */ # define costab10 MAD_F(0x0e1c5979) /* 0.881921264 */ # define costab11 MAD_F(0x0db941a3) /* 0.857728610 */ # define costab12 MAD_F(0x0d4db315) /* 0.831469612 */ # define costab13 MAD_F(0x0cd9f024) /* 0.803207531 */ # define costab14 MAD_F(0x0c5e4036) /* 0.773010453 */ # define costab15 MAD_F(0x0bdaef91) /* 0.740951125 */ # define costab16 MAD_F(0x0b504f33) /* 0.707106781 */ # define costab17 MAD_F(0x0abeb49a) /* 0.671558955 */ # define costab18 MAD_F(0x0a267993) /* 0.634393284 */ # define costab19 MAD_F(0x0987fbfe) /* 0.595699304 */ # define costab20 MAD_F(0x08e39d9d) /* 0.555570233 */ # define costab21 MAD_F(0x0839c3cd) /* 0.514102744 */ # define costab22 MAD_F(0x078ad74e) /* 0.471396737 */ # define costab23 MAD_F(0x06d74402) /* 0.427555093 */ # define costab24 MAD_F(0x061f78aa) /* 0.382683432 */ # define costab25 MAD_F(0x0563e69d) /* 0.336889853 */ # define costab26 MAD_F(0x04a5018c) /* 0.290284677 */ # define costab27 MAD_F(0x03e33f2f) /* 0.242980180 */ # define costab28 MAD_F(0x031f1708) /* 0.195090322 */ # define costab29 MAD_F(0x0259020e) /* 0.146730474 */ # define costab30 MAD_F(0x01917a6c) /* 0.098017140 */ # define costab31 MAD_F(0x00c8fb30) /* 0.049067674 */ # endif t0 = in[0] + in[31]; t16 = MUL(in[0] - in[31], costab1); t1 = in[15] + in[16]; t17 = MUL(in[15] - in[16], costab31); t41 = t16 + t17; t59 = MUL(t16 - t17, costab2); t33 = t0 + t1; t50 = MUL(t0 - t1, costab2); t2 = in[7] + in[24]; t18 = MUL(in[7] - in[24], costab15); t3 = in[8] + in[23]; t19 = MUL(in[8] - in[23], costab17); t42 = t18 + t19; t60 = MUL(t18 - t19, costab30); t34 = t2 + t3; t51 = MUL(t2 - t3, costab30); t4 = in[3] + in[28]; t20 = MUL(in[3] - in[28], costab7); t5 = in[12] + in[19]; t21 = MUL(in[12] - in[19], costab25); t43 = t20 + t21; t61 = MUL(t20 - t21, costab14); t35 = t4 + t5; t52 = MUL(t4 - t5, costab14); t6 = in[4] + in[27]; t22 = MUL(in[4] - in[27], costab9); t7 = in[11] + in[20]; t23 = MUL(in[11] - in[20], costab23); t44 = t22 + t23; t62 = MUL(t22 - t23, costab18); t36 = t6 + t7; t53 = MUL(t6 - t7, costab18); t8 = in[1] + in[30]; t24 = MUL(in[1] - in[30], costab3); t9 = in[14] + in[17]; t25 = MUL(in[14] - in[17], costab29); t45 = t24 + t25; t63 = MUL(t24 - t25, costab6); t37 = t8 + t9; t54 = MUL(t8 - t9, costab6); t10 = in[6] + in[25]; t26 = MUL(in[6] - in[25], costab13); t11 = in[9] + in[22]; t27 = MUL(in[9] - in[22], costab19); t46 = t26 + t27; t64 = MUL(t26 - t27, costab26); t38 = t10 + t11; t55 = MUL(t10 - t11, costab26); t12 = in[2] + in[29]; t28 = MUL(in[2] - in[29], costab5); t13 = in[13] + in[18]; t29 = MUL(in[13] - in[18], costab27); t47 = t28 + t29; t65 = MUL(t28 - t29, costab10); t39 = t12 + t13; t56 = MUL(t12 - t13, costab10); t14 = in[5] + in[26]; t30 = MUL(in[5] - in[26], costab11); t15 = in[10] + in[21]; t31 = MUL(in[10] - in[21], costab21); t48 = t30 + t31; t66 = MUL(t30 - t31, costab22); t40 = t14 + t15; t57 = MUL(t14 - t15, costab22); t69 = t33 + t34; t89 = MUL(t33 - t34, costab4); t70 = t35 + t36; t90 = MUL(t35 - t36, costab28); t71 = t37 + t38; t91 = MUL(t37 - t38, costab12); t72 = t39 + t40; t92 = MUL(t39 - t40, costab20); t73 = t41 + t42; t94 = MUL(t41 - t42, costab4); t74 = t43 + t44; t95 = MUL(t43 - t44, costab28); t75 = t45 + t46; t96 = MUL(t45 - t46, costab12); t76 = t47 + t48; t97 = MUL(t47 - t48, costab20); t78 = t50 + t51; t100 = MUL(t50 - t51, costab4); t79 = t52 + t53; t101 = MUL(t52 - t53, costab28); t80 = t54 + t55; t102 = MUL(t54 - t55, costab12); t81 = t56 + t57; t103 = MUL(t56 - t57, costab20); t83 = t59 + t60; t106 = MUL(t59 - t60, costab4); t84 = t61 + t62; t107 = MUL(t61 - t62, costab28); t85 = t63 + t64; t108 = MUL(t63 - t64, costab12); t86 = t65 + t66; t109 = MUL(t65 - t66, costab20); t113 = t69 + t70; t114 = t71 + t72; /* 0 */ hi[15][slot] = SHIFT(t113 + t114); /* 16 */ lo[ 0][slot] = SHIFT(MUL(t113 - t114, costab16)); t115 = t73 + t74; t116 = t75 + t76; t32 = t115 + t116; /* 1 */ hi[14][slot] = SHIFT(t32); t118 = t78 + t79; t119 = t80 + t81; t58 = t118 + t119; /* 2 */ hi[13][slot] = SHIFT(t58); t121 = t83 + t84; t122 = t85 + t86; t67 = t121 + t122; t49 = (t67 * 2) - t32; /* 3 */ hi[12][slot] = SHIFT(t49); t125 = t89 + t90; t126 = t91 + t92; t93 = t125 + t126; /* 4 */ hi[11][slot] = SHIFT(t93); t128 = t94 + t95; t129 = t96 + t97; t98 = t128 + t129; t68 = (t98 * 2) - t49; /* 5 */ hi[10][slot] = SHIFT(t68); t132 = t100 + t101; t133 = t102 + t103; t104 = t132 + t133; t82 = (t104 * 2) - t58; /* 6 */ hi[ 9][slot] = SHIFT(t82); t136 = t106 + t107; t137 = t108 + t109; t110 = t136 + t137; t87 = (t110 * 2) - t67; t77 = (t87 * 2) - t68; /* 7 */ hi[ 8][slot] = SHIFT(t77); t141 = MUL(t69 - t70, costab8); t142 = MUL(t71 - t72, costab24); t143 = t141 + t142; /* 8 */ hi[ 7][slot] = SHIFT(t143); /* 24 */ lo[ 8][slot] = SHIFT((MUL(t141 - t142, costab16) * 2) - t143); t144 = MUL(t73 - t74, costab8); t145 = MUL(t75 - t76, costab24); t146 = t144 + t145; t88 = (t146 * 2) - t77; /* 9 */ hi[ 6][slot] = SHIFT(t88); t148 = MUL(t78 - t79, costab8); t149 = MUL(t80 - t81, costab24); t150 = t148 + t149; t105 = (t150 * 2) - t82; /* 10 */ hi[ 5][slot] = SHIFT(t105); t152 = MUL(t83 - t84, costab8); t153 = MUL(t85 - t86, costab24); t154 = t152 + t153; t111 = (t154 * 2) - t87; t99 = (t111 * 2) - t88; /* 11 */ hi[ 4][slot] = SHIFT(t99); t157 = MUL(t89 - t90, costab8); t158 = MUL(t91 - t92, costab24); t159 = t157 + t158; t127 = (t159 * 2) - t93; /* 12 */ hi[ 3][slot] = SHIFT(t127); t160 = (MUL(t125 - t126, costab16) * 2) - t127; /* 20 */ lo[ 4][slot] = SHIFT(t160); /* 28 */ lo[12][slot] = SHIFT((((MUL(t157 - t158, costab16) * 2) - t159) * 2) - t160); t161 = MUL(t94 - t95, costab8); t162 = MUL(t96 - t97, costab24); t163 = t161 + t162; t130 = (t163 * 2) - t98; t112 = (t130 * 2) - t99; /* 13 */ hi[ 2][slot] = SHIFT(t112); t164 = (MUL(t128 - t129, costab16) * 2) - t130; t166 = MUL(t100 - t101, costab8); t167 = MUL(t102 - t103, costab24); t168 = t166 + t167; t134 = (t168 * 2) - t104; t120 = (t134 * 2) - t105; /* 14 */ hi[ 1][slot] = SHIFT(t120); t135 = (MUL(t118 - t119, costab16) * 2) - t120; /* 18 */ lo[ 2][slot] = SHIFT(t135); t169 = (MUL(t132 - t133, costab16) * 2) - t134; t151 = (t169 * 2) - t135; /* 22 */ lo[ 6][slot] = SHIFT(t151); t170 = (((MUL(t148 - t149, costab16) * 2) - t150) * 2) - t151; /* 26 */ lo[10][slot] = SHIFT(t170); /* 30 */ lo[14][slot] = SHIFT((((((MUL(t166 - t167, costab16) * 2) - t168) * 2) - t169) * 2) - t170); t171 = MUL(t106 - t107, costab8); t172 = MUL(t108 - t109, costab24); t173 = t171 + t172; t138 = (t173 * 2) - t110; t123 = (t138 * 2) - t111; t139 = (MUL(t121 - t122, costab16) * 2) - t123; t117 = (t123 * 2) - t112; /* 15 */ hi[ 0][slot] = SHIFT(t117); t124 = (MUL(t115 - t116, costab16) * 2) - t117; /* 17 */ lo[ 1][slot] = SHIFT(t124); t131 = (t139 * 2) - t124; /* 19 */ lo[ 3][slot] = SHIFT(t131); t140 = (t164 * 2) - t131; /* 21 */ lo[ 5][slot] = SHIFT(t140); t174 = (MUL(t136 - t137, costab16) * 2) - t138; t155 = (t174 * 2) - t139; t147 = (t155 * 2) - t140; /* 23 */ lo[ 7][slot] = SHIFT(t147); t156 = (((MUL(t144 - t145, costab16) * 2) - t146) * 2) - t147; /* 25 */ lo[ 9][slot] = SHIFT(t156); t175 = (((MUL(t152 - t153, costab16) * 2) - t154) * 2) - t155; t165 = (t175 * 2) - t156; /* 27 */ lo[11][slot] = SHIFT(t165); t176 = (((((MUL(t161 - t162, costab16) * 2) - t163) * 2) - t164) * 2) - t165; /* 29 */ lo[13][slot] = SHIFT(t176); /* 31 */ lo[15][slot] = SHIFT((((((((MUL(t171 - t172, costab16) * 2) - t173) * 2) - t174) * 2) - t175) * 2) - t176); /* * Totals: * 80 multiplies * 80 additions * 119 subtractions * 49 shifts (not counting SSO) */ } # undef MUL # undef SHIFT /* third SSO shift and/or D[] optimization preshift */ # if defined(OPT_SSO) # if MAD_F_FRACBITS != 28 # error "MAD_F_FRACBITS must be 28 to use OPT_SSO" # endif # define ML0(hi, lo, x, y) ((lo) = (x) * (y)) # define MLA(hi, lo, x, y) ((lo) += (x) * (y)) # define MLN(hi, lo) ((lo) = -(lo)) # define MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo)) # define SHIFT(x) ((x) >> 2) # define PRESHIFT(x) ((MAD_F(x) + (1L << 13)) >> 14) # else # define ML0(hi, lo, x, y) MAD_F_ML0((hi), (lo), (x), (y)) # define MLA(hi, lo, x, y) MAD_F_MLA((hi), (lo), (x), (y)) # define MLN(hi, lo) MAD_F_MLN((hi), (lo)) # define MLZ(hi, lo) MAD_F_MLZ((hi), (lo)) # define SHIFT(x) (x) # if defined(MAD_F_SCALEBITS) # undef MAD_F_SCALEBITS # define MAD_F_SCALEBITS (MAD_F_FRACBITS - 12) # define PRESHIFT(x) (MAD_F(x) >> 12) # else # define PRESHIFT(x) MAD_F(x) # endif # endif static mad_fixed_t const D[17][32] = { # include "D.dat" }; # if defined(ASO_SYNTH) void synth_full(struct mad_synth *, struct mad_frame const *, unsigned int, unsigned int); # else /* * NAME: synth->full() * DESCRIPTION: perform full frequency PCM synthesis */ static void synth_full(struct mad_synth *synth, struct mad_frame const *frame, unsigned int nch, unsigned int ns) { unsigned int phase, ch, s, sb, pe, po; mad_fixed_t *pcm1, *pcm2, (*filter)[2][2][16][8]; mad_fixed_t const (*sbsample)[36][32]; register mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8]; register mad_fixed_t const (*Dptr)[32], *ptr; register mad_fixed64hi_t hi; register mad_fixed64lo_t lo; for (ch = 0; ch < nch; ++ch) { sbsample = &frame->sbsample[ch]; filter = &synth->filter[ch]; phase = synth->phase; pcm1 = synth->pcm.samples[ch]; for (s = 0; s < ns; ++s) { dct32((*sbsample)[s], phase >> 1, (*filter)[0][phase & 1], (*filter)[1][phase & 1]); pe = phase & ~1; po = ((phase - 1) & 0xf) | 1; /* calculate 32 samples */ fe = &(*filter)[0][ phase & 1][0]; fx = &(*filter)[0][~phase & 1][0]; fo = &(*filter)[1][~phase & 1][0]; Dptr = &D[0]; ptr = *Dptr + po; ML0(hi, lo, (*fx)[0], ptr[ 0]); MLA(hi, lo, (*fx)[1], ptr[14]); MLA(hi, lo, (*fx)[2], ptr[12]); MLA(hi, lo, (*fx)[3], ptr[10]); MLA(hi, lo, (*fx)[4], ptr[ 8]); MLA(hi, lo, (*fx)[5], ptr[ 6]); MLA(hi, lo, (*fx)[6], ptr[ 4]); MLA(hi, lo, (*fx)[7], ptr[ 2]); MLN(hi, lo); ptr = *Dptr + pe; MLA(hi, lo, (*fe)[0], ptr[ 0]); MLA(hi, lo, (*fe)[1], ptr[14]); MLA(hi, lo, (*fe)[2], ptr[12]); MLA(hi, lo, (*fe)[3], ptr[10]); MLA(hi, lo, (*fe)[4], ptr[ 8]); MLA(hi, lo, (*fe)[5], ptr[ 6]); MLA(hi, lo, (*fe)[6], ptr[ 4]); MLA(hi, lo, (*fe)[7], ptr[ 2]); *pcm1++ = SHIFT(MLZ(hi, lo)); pcm2 = pcm1 + 30; for (sb = 1; sb < 16; ++sb) { ++fe; ++Dptr; /* D[32 - sb][i] == -D[sb][31 - i] */ ptr = *Dptr + po; ML0(hi, lo, (*fo)[0], ptr[ 0]); MLA(hi, lo, (*fo)[1], ptr[14]); MLA(hi, lo, (*fo)[2], ptr[12]); MLA(hi, lo, (*fo)[3], ptr[10]); MLA(hi, lo, (*fo)[4], ptr[ 8]); MLA(hi, lo, (*fo)[5], ptr[ 6]); MLA(hi, lo, (*fo)[6], ptr[ 4]); MLA(hi, lo, (*fo)[7], ptr[ 2]); MLN(hi, lo); ptr = *Dptr + pe; MLA(hi, lo, (*fe)[7], ptr[ 2]); MLA(hi, lo, (*fe)[6], ptr[ 4]); MLA(hi, lo, (*fe)[5], ptr[ 6]); MLA(hi, lo, (*fe)[4], ptr[ 8]); MLA(hi, lo, (*fe)[3], ptr[10]); MLA(hi, lo, (*fe)[2], ptr[12]); MLA(hi, lo, (*fe)[1], ptr[14]); MLA(hi, lo, (*fe)[0], ptr[ 0]); *pcm1++ = SHIFT(MLZ(hi, lo)); ptr = *Dptr - pe; ML0(hi, lo, (*fe)[0], ptr[31 - 16]); MLA(hi, lo, (*fe)[1], ptr[31 - 14]); MLA(hi, lo, (*fe)[2], ptr[31 - 12]); MLA(hi, lo, (*fe)[3], ptr[31 - 10]); MLA(hi, lo, (*fe)[4], ptr[31 - 8]); MLA(hi, lo, (*fe)[5], ptr[31 - 6]); MLA(hi, lo, (*fe)[6], ptr[31 - 4]); MLA(hi, lo, (*fe)[7], ptr[31 - 2]); ptr = *Dptr - po; MLA(hi, lo, (*fo)[7], ptr[31 - 2]); MLA(hi, lo, (*fo)[6], ptr[31 - 4]); MLA(hi, lo, (*fo)[5], ptr[31 - 6]); MLA(hi, lo, (*fo)[4], ptr[31 - 8]); MLA(hi, lo, (*fo)[3], ptr[31 - 10]); MLA(hi, lo, (*fo)[2], ptr[31 - 12]); MLA(hi, lo, (*fo)[1], ptr[31 - 14]); MLA(hi, lo, (*fo)[0], ptr[31 - 16]); *pcm2-- = SHIFT(MLZ(hi, lo)); ++fo; } ++Dptr; ptr = *Dptr + po; ML0(hi, lo, (*fo)[0], ptr[ 0]); MLA(hi, lo, (*fo)[1], ptr[14]); MLA(hi, lo, (*fo)[2], ptr[12]); MLA(hi, lo, (*fo)[3], ptr[10]); MLA(hi, lo, (*fo)[4], ptr[ 8]); MLA(hi, lo, (*fo)[5], ptr[ 6]); MLA(hi, lo, (*fo)[6], ptr[ 4]); MLA(hi, lo, (*fo)[7], ptr[ 2]); *pcm1 = SHIFT(-MLZ(hi, lo)); pcm1 += 16; phase = (phase + 1) % 16; } } } # endif /* * NAME: synth->half() * DESCRIPTION: perform half frequency PCM synthesis */ static void synth_half(struct mad_synth *synth, struct mad_frame const *frame, unsigned int nch, unsigned int ns) { unsigned int phase, ch, s, sb, pe, po; mad_fixed_t *pcm1, *pcm2, (*filter)[2][2][16][8]; mad_fixed_t const (*sbsample)[36][32]; register mad_fixed_t (*fe)[8], (*fx)[8], (*fo)[8]; register mad_fixed_t const (*Dptr)[32], *ptr; register mad_fixed64hi_t hi; register mad_fixed64lo_t lo; for (ch = 0; ch < nch; ++ch) { sbsample = &frame->sbsample[ch]; filter = &synth->filter[ch]; phase = synth->phase; pcm1 = synth->pcm.samples[ch]; for (s = 0; s < ns; ++s) { dct32((*sbsample)[s], phase >> 1, (*filter)[0][phase & 1], (*filter)[1][phase & 1]); pe = phase & ~1; po = ((phase - 1) & 0xf) | 1; /* calculate 16 samples */ fe = &(*filter)[0][ phase & 1][0]; fx = &(*filter)[0][~phase & 1][0]; fo = &(*filter)[1][~phase & 1][0]; Dptr = &D[0]; ptr = *Dptr + po; ML0(hi, lo, (*fx)[0], ptr[ 0]); MLA(hi, lo, (*fx)[1], ptr[14]); MLA(hi, lo, (*fx)[2], ptr[12]); MLA(hi, lo, (*fx)[3], ptr[10]); MLA(hi, lo, (*fx)[4], ptr[ 8]); MLA(hi, lo, (*fx)[5], ptr[ 6]); MLA(hi, lo, (*fx)[6], ptr[ 4]); MLA(hi, lo, (*fx)[7], ptr[ 2]); MLN(hi, lo); ptr = *Dptr + pe; MLA(hi, lo, (*fe)[0], ptr[ 0]); MLA(hi, lo, (*fe)[1], ptr[14]); MLA(hi, lo, (*fe)[2], ptr[12]); MLA(hi, lo, (*fe)[3], ptr[10]); MLA(hi, lo, (*fe)[4], ptr[ 8]); MLA(hi, lo, (*fe)[5], ptr[ 6]); MLA(hi, lo, (*fe)[6], ptr[ 4]); MLA(hi, lo, (*fe)[7], ptr[ 2]); *pcm1++ = SHIFT(MLZ(hi, lo)); pcm2 = pcm1 + 14; for (sb = 1; sb < 16; ++sb) { ++fe; ++Dptr; /* D[32 - sb][i] == -D[sb][31 - i] */ if (!(sb & 1)) { ptr = *Dptr + po; ML0(hi, lo, (*fo)[0], ptr[ 0]); MLA(hi, lo, (*fo)[1], ptr[14]); MLA(hi, lo, (*fo)[2], ptr[12]); MLA(hi, lo, (*fo)[3], ptr[10]); MLA(hi, lo, (*fo)[4], ptr[ 8]); MLA(hi, lo, (*fo)[5], ptr[ 6]); MLA(hi, lo, (*fo)[6], ptr[ 4]); MLA(hi, lo, (*fo)[7], ptr[ 2]); MLN(hi, lo); ptr = *Dptr + pe; MLA(hi, lo, (*fe)[7], ptr[ 2]); MLA(hi, lo, (*fe)[6], ptr[ 4]); MLA(hi, lo, (*fe)[5], ptr[ 6]); MLA(hi, lo, (*fe)[4], ptr[ 8]); MLA(hi, lo, (*fe)[3], ptr[10]); MLA(hi, lo, (*fe)[2], ptr[12]); MLA(hi, lo, (*fe)[1], ptr[14]); MLA(hi, lo, (*fe)[0], ptr[ 0]); *pcm1++ = SHIFT(MLZ(hi, lo)); ptr = *Dptr - po; ML0(hi, lo, (*fo)[7], ptr[31 - 2]); MLA(hi, lo, (*fo)[6], ptr[31 - 4]); MLA(hi, lo, (*fo)[5], ptr[31 - 6]); MLA(hi, lo, (*fo)[4], ptr[31 - 8]); MLA(hi, lo, (*fo)[3], ptr[31 - 10]); MLA(hi, lo, (*fo)[2], ptr[31 - 12]); MLA(hi, lo, (*fo)[1], ptr[31 - 14]); MLA(hi, lo, (*fo)[0], ptr[31 - 16]); ptr = *Dptr - pe; MLA(hi, lo, (*fe)[0], ptr[31 - 16]); MLA(hi, lo, (*fe)[1], ptr[31 - 14]); MLA(hi, lo, (*fe)[2], ptr[31 - 12]); MLA(hi, lo, (*fe)[3], ptr[31 - 10]); MLA(hi, lo, (*fe)[4], ptr[31 - 8]); MLA(hi, lo, (*fe)[5], ptr[31 - 6]); MLA(hi, lo, (*fe)[6], ptr[31 - 4]); MLA(hi, lo, (*fe)[7], ptr[31 - 2]); *pcm2-- = SHIFT(MLZ(hi, lo)); } ++fo; } ++Dptr; ptr = *Dptr + po; ML0(hi, lo, (*fo)[0], ptr[ 0]); MLA(hi, lo, (*fo)[1], ptr[14]); MLA(hi, lo, (*fo)[2], ptr[12]); MLA(hi, lo, (*fo)[3], ptr[10]); MLA(hi, lo, (*fo)[4], ptr[ 8]); MLA(hi, lo, (*fo)[5], ptr[ 6]); MLA(hi, lo, (*fo)[6], ptr[ 4]); MLA(hi, lo, (*fo)[7], ptr[ 2]); *pcm1 = SHIFT(-MLZ(hi, lo)); pcm1 += 8; phase = (phase + 1) % 16; } } } /* * NAME: synth->frame() * DESCRIPTION: perform PCM synthesis of frame subband samples */ void mad_synth_frame(struct mad_synth *synth, struct mad_frame const *frame) { unsigned int nch, ns; void (*synth_frame)(struct mad_synth *, struct mad_frame const *, unsigned int, unsigned int); nch = MAD_NCHANNELS(&frame->header); ns = MAD_NSBSAMPLES(&frame->header); synth->pcm.samplerate = frame->header.samplerate; synth->pcm.channels = nch; synth->pcm.length = 32 * ns; synth_frame = synth_full; if (frame->options & MAD_OPTION_HALFSAMPLERATE) { synth->pcm.samplerate /= 2; synth->pcm.length /= 2; synth_frame = synth_half; } synth_frame(synth, frame, nch, ns); synth->phase = (synth->phase + ns) % 16; }
gpl-2.0
dorimanx/Dorimanx-SG2-I9100-Kernel
drivers/media/i2c/vp27smpx.c
533
5111
/* * vp27smpx - driver version 0.0.1 * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * * Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com> * and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> MODULE_DESCRIPTION("vp27smpx driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); /* ----------------------------------------------------------------------- */ struct vp27smpx_state { struct v4l2_subdev sd; int radio; u32 audmode; }; static inline struct vp27smpx_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct vp27smpx_state, sd); } static void vp27smpx_set_audmode(struct v4l2_subdev *sd, u32 audmode) { struct vp27smpx_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 data[3] = { 0x00, 0x00, 0x04 }; switch (audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_LANG1: break; case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1_LANG2: data[1] = 0x01; break; case V4L2_TUNER_MODE_LANG2: data[1] = 0x02; break; } if (i2c_master_send(client, data, sizeof(data)) != sizeof(data)) v4l2_err(sd, "I/O error setting audmode\n"); else state->audmode = audmode; } static int vp27smpx_s_radio(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); state->radio = 1; return 0; } static int vp27smpx_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct vp27smpx_state *state = to_state(sd); state->radio = 0; return 0; } static int vp27smpx_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (!state->radio) vp27smpx_set_audmode(sd, vt->audmode); return 0; } static int vp27smpx_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct vp27smpx_state *state = to_state(sd); if (state->radio) return 0; vt->audmode = state->audmode; vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; vt->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vp27smpx_log_status(struct v4l2_subdev *sd) { struct vp27smpx_state *state = to_state(sd); v4l2_info(sd, "Audio Mode: %u%s\n", state->audmode, state->radio ? " (Radio)" : ""); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops vp27smpx_core_ops = { .log_status = vp27smpx_log_status, .s_std = vp27smpx_s_std, }; static const struct v4l2_subdev_tuner_ops vp27smpx_tuner_ops = { .s_radio = vp27smpx_s_radio, .s_tuner = vp27smpx_s_tuner, .g_tuner = vp27smpx_g_tuner, }; static const struct v4l2_subdev_ops vp27smpx_ops = { .core = &vp27smpx_core_ops, .tuner = &vp27smpx_tuner_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int vp27smpx_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vp27smpx_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &vp27smpx_ops); state->audmode = V4L2_TUNER_MODE_STEREO; /* initialize vp27smpx */ vp27smpx_set_audmode(sd, state->audmode); return 0; } static int vp27smpx_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id vp27smpx_id[] = { { "vp27smpx", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, vp27smpx_id); static struct i2c_driver vp27smpx_driver = { .driver = { .owner = THIS_MODULE, .name = "vp27smpx", }, .probe = vp27smpx_probe, .remove = vp27smpx_remove, .id_table = vp27smpx_id, }; module_i2c_driver(vp27smpx_driver);
gpl-2.0
pcamarillor/linux
drivers/tty/serial/8250/8250_mtk.c
533
8319
/* * Mediatek 8250 driver. * * Copyright (c) 2014 MundoReader S.L. * Author: Matthias Brugger <matthias.bgg@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/serial_8250.h> #include <linux/serial_reg.h> #include "8250.h" #define UART_MTK_HIGHS 0x09 /* Highspeed register */ #define UART_MTK_SAMPLE_COUNT 0x0a /* Sample count register */ #define UART_MTK_SAMPLE_POINT 0x0b /* Sample point register */ #define MTK_UART_RATE_FIX 0x0d /* UART Rate Fix Register */ struct mtk8250_data { int line; struct clk *uart_clk; struct clk *bus_clk; }; static void mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { unsigned long flags; unsigned int baud, quot; struct uart_8250_port *up = container_of(port, struct uart_8250_port, port); serial8250_do_set_termios(port, termios, old); /* * Mediatek UARTs use an extra highspeed register (UART_MTK_HIGHS) * * We need to recalcualte the quot register, as the claculation depends * on the vaule in the highspeed register. * * Some baudrates are not supported by the chip, so we use the next * lower rate supported and update termios c_flag. * * If highspeed register is set to 3, we need to specify sample count * and sample point to increase accuracy. If not, we reset the * registers to their default values. */ baud = uart_get_baud_rate(port, termios, old, port->uartclk / 16 / 0xffff, port->uartclk / 16); if (baud <= 115200) { serial_port_out(port, UART_MTK_HIGHS, 0x0); quot = uart_get_divisor(port, baud); } else if (baud <= 576000) { serial_port_out(port, UART_MTK_HIGHS, 0x2); /* Set to next lower baudrate supported */ if ((baud == 500000) || (baud == 576000)) baud = 460800; quot = DIV_ROUND_UP(port->uartclk, 4 * baud); } else { serial_port_out(port, UART_MTK_HIGHS, 0x3); /* Set to highest baudrate supported */ if (baud >= 1152000) baud = 921600; quot = DIV_ROUND_UP(port->uartclk, 256 * baud); } /* * Ok, we're now changing the port state. Do it with * interrupts disabled. */ spin_lock_irqsave(&port->lock, flags); /* set DLAB we have cval saved in up->lcr from the call to the core */ serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB); serial_dl_write(up, quot); /* reset DLAB */ serial_port_out(port, UART_LCR, up->lcr); if (baud > 460800) { unsigned int tmp; tmp = DIV_ROUND_CLOSEST(port->uartclk, quot * baud); serial_port_out(port, UART_MTK_SAMPLE_COUNT, tmp - 1); serial_port_out(port, UART_MTK_SAMPLE_POINT, (tmp - 2) >> 1); } else { serial_port_out(port, UART_MTK_SAMPLE_COUNT, 0x00); serial_port_out(port, UART_MTK_SAMPLE_POINT, 0xff); } spin_unlock_irqrestore(&port->lock, flags); /* Don't rewrite B0 */ if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); } static int mtk8250_runtime_suspend(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); clk_disable_unprepare(data->uart_clk); clk_disable_unprepare(data->bus_clk); return 0; } static int mtk8250_runtime_resume(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); int err; err = clk_prepare_enable(data->uart_clk); if (err) { dev_warn(dev, "Can't enable clock\n"); return err; } err = clk_prepare_enable(data->bus_clk); if (err) { dev_warn(dev, "Can't enable bus clock\n"); return err; } return 0; } static void mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) { if (!state) pm_runtime_get_sync(port->dev); serial8250_do_pm(port, state, old); if (state) pm_runtime_put_sync_suspend(port->dev); } static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, struct mtk8250_data *data) { data->uart_clk = devm_clk_get(&pdev->dev, "baud"); if (IS_ERR(data->uart_clk)) { /* * For compatibility with older device trees try unnamed * clk when no baud clk can be found. */ data->uart_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(data->uart_clk)) { dev_warn(&pdev->dev, "Can't get uart clock\n"); return PTR_ERR(data->uart_clk); } return 0; } data->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(data->bus_clk)) return PTR_ERR(data->bus_clk); return 0; } static int mtk8250_probe(struct platform_device *pdev) { struct uart_8250_port uart = {}; struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); struct mtk8250_data *data; int err; if (!regs || !irq) { dev_err(&pdev->dev, "no registers/irq defined\n"); return -EINVAL; } uart.port.membase = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); if (!uart.port.membase) return -ENOMEM; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; if (pdev->dev.of_node) { err = mtk8250_probe_of(pdev, &uart.port, data); if (err) return err; } else return -ENODEV; spin_lock_init(&uart.port.lock); uart.port.mapbase = regs->start; uart.port.irq = irq->start; uart.port.pm = mtk8250_do_pm; uart.port.type = PORT_16550; uart.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT; uart.port.dev = &pdev->dev; uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.port.private_data = data; uart.port.set_termios = mtk8250_set_termios; uart.port.uartclk = clk_get_rate(data->uart_clk); /* Disable Rate Fix function */ writel(0x0, uart.port.membase + (MTK_UART_RATE_FIX << uart.port.regshift)); platform_set_drvdata(pdev, data); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { err = mtk8250_runtime_resume(&pdev->dev); if (err) return err; } data->line = serial8250_register_8250_port(&uart); if (data->line < 0) return data->line; return 0; } static int mtk8250_remove(struct platform_device *pdev) { struct mtk8250_data *data = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); serial8250_unregister_port(data->line); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mtk8250_runtime_suspend(&pdev->dev); return 0; } #ifdef CONFIG_PM_SLEEP static int mtk8250_suspend(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); serial8250_suspend_port(data->line); return 0; } static int mtk8250_resume(struct device *dev) { struct mtk8250_data *data = dev_get_drvdata(dev); serial8250_resume_port(data->line); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops mtk8250_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mtk8250_suspend, mtk8250_resume) SET_RUNTIME_PM_OPS(mtk8250_runtime_suspend, mtk8250_runtime_resume, NULL) }; static const struct of_device_id mtk8250_of_match[] = { { .compatible = "mediatek,mt6577-uart" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, mtk8250_of_match); static struct platform_driver mtk8250_platform_driver = { .driver = { .name = "mt6577-uart", .pm = &mtk8250_pm_ops, .of_match_table = mtk8250_of_match, }, .probe = mtk8250_probe, .remove = mtk8250_remove, }; module_platform_driver(mtk8250_platform_driver); #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init early_mtk8250_setup(struct earlycon_device *device, const char *options) { if (!device->port.membase) return -ENODEV; device->port.iotype = UPIO_MEM32; return early_serial8250_setup(device, NULL); } OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup); #endif MODULE_AUTHOR("Matthias Brugger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Mediatek 8250 serial port driver");
gpl-2.0
piercexue/linux-3.10-ltsi
drivers/net/usb/sr9700.c
789
13208
/* * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices * * Author : Liu Junliang <liujunliang_ljl@163.com> * * Based on dm9601.c * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/stddef.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/usbnet.h> #include "sr9700.h" static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data) { int err; err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data, length); if ((err != length) && (err >= 0)) err = -EINVAL; return err; } static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data) { int err; err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, length); if ((err >= 0) && (err < length)) err = -EINVAL; return err; } static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value) { return sr_read(dev, reg, 1, value); } static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value) { return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, value, reg, NULL, 0); } static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) { usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data, length); } static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value) { usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG, value, reg, NULL, 0); } static int wait_phy_eeprom_ready(struct usbnet *dev, int phy) { int i; for (i = 0; i < SR_SHARE_TIMEOUT; i++) { u8 tmp = 0; int ret; udelay(1); ret = sr_read_reg(dev, EPCR, &tmp); if (ret < 0) return ret; /* ready */ if (!(tmp & EPCR_ERRE)) return 0; } netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom"); return -EIO; } static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg, __le16 *value) { int ret; mutex_lock(&dev->phy_mutex); sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; sr_write_reg(dev, EPCR, 0x0); ret = sr_read(dev, EPDR, 2, value); netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n", phy, reg, *value, ret); out_unlock: mutex_unlock(&dev->phy_mutex); return ret; } static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg, __le16 value) { int ret; mutex_lock(&dev->phy_mutex); ret = sr_write(dev, EPDR, 2, &value); if (ret < 0) goto out_unlock; sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg); sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) : (EPCR_WEP | EPCR_ERPRW)); ret = wait_phy_eeprom_ready(dev, phy); if (ret < 0) goto out_unlock; sr_write_reg(dev, EPCR, 0x0); out_unlock: mutex_unlock(&dev->phy_mutex); return ret; } static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value) { return sr_share_read_word(dev, 0, offset, value); } static int sr9700_get_eeprom_len(struct net_device *netdev) { return SR_EEPROM_LEN; } static int sr9700_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *data) { struct usbnet *dev = netdev_priv(netdev); __le16 *buf = (__le16 *)data; int ret = 0; int i; /* access is 16bit */ if ((eeprom->offset & 0x01) || (eeprom->len & 0x01)) return -EINVAL; for (i = 0; i < eeprom->len / 2; i++) { ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i); if (ret < 0) break; } return ret; } static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); __le16 res; int rc = 0; if (phy_id) { netdev_dbg(netdev, "Only internal phy supported\n"); return 0; } /* Access NSR_LINKST bit for link status instead of MII_BMSR */ if (loc == MII_BMSR) { u8 value; sr_read_reg(dev, NSR, &value); if (value & NSR_LINKST) rc = 1; } sr_share_read_word(dev, 1, loc, &res); if (rc == 1) res = le16_to_cpu(res) | BMSR_LSTATUS; else res = le16_to_cpu(res) & ~BMSR_LSTATUS; netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", phy_id, loc, res); return res; } static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc, int val) { struct usbnet *dev = netdev_priv(netdev); __le16 res = cpu_to_le16(val); if (phy_id) { netdev_dbg(netdev, "Only internal phy supported\n"); return; } netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", phy_id, loc, val); sr_share_write_word(dev, 1, loc, res); } static u32 sr9700_get_link(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); u8 value = 0; int rc = 0; /* Get the Link Status directly */ sr_read_reg(dev, NSR, &value); if (value & NSR_LINKST) rc = 1; return rc; } static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(netdev); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static const struct ethtool_ops sr9700_ethtool_ops = { .get_drvinfo = usbnet_get_drvinfo, .get_link = sr9700_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_eeprom_len = sr9700_get_eeprom_len, .get_eeprom = sr9700_get_eeprom, .get_settings = usbnet_get_settings, .set_settings = usbnet_set_settings, .nway_reset = usbnet_nway_reset, }; static void sr9700_set_multicast(struct net_device *netdev) { struct usbnet *dev = netdev_priv(netdev); /* We use the 20 byte dev->data for our 8 byte filter buffer * to avoid allocating memory that is tricky to free later */ u8 *hashes = (u8 *)&dev->data; /* rx_ctl setting : enable, disable_long, disable_crc */ u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG; memset(hashes, 0x00, SR_MCAST_SIZE); /* broadcast address */ hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG; if (netdev->flags & IFF_PROMISC) { rx_ctl |= RCR_PRMSC; } else if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > SR_MCAST_MAX) { rx_ctl |= RCR_RUNT; } else if (!netdev_mc_empty(netdev)) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; hashes[crc >> 3] |= 1 << (crc & 0x7); } } sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes); sr_write_reg_async(dev, RCR, rx_ctl); } static int sr9700_set_mac_address(struct net_device *netdev, void *p) { struct usbnet *dev = netdev_priv(netdev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) { netdev_err(netdev, "not setting invalid mac address %pM\n", addr->sa_data); return -EINVAL; } memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); sr_write_async(dev, PAR, 6, netdev->dev_addr); return 0; } static const struct net_device_ops sr9700_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = sr9700_ioctl, .ndo_set_rx_mode = sr9700_set_multicast, .ndo_set_mac_address = sr9700_set_mac_address, }; static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf) { struct net_device *netdev; struct mii_if_info *mii; int ret; ret = usbnet_get_endpoints(dev, intf); if (ret) goto out; netdev = dev->net; netdev->netdev_ops = &sr9700_netdev_ops; netdev->ethtool_ops = &sr9700_ethtool_ops; netdev->hard_header_len += SR_TX_OVERHEAD; dev->hard_mtu = netdev->mtu + netdev->hard_header_len; /* bulkin buffer is preferably not less than 3K */ dev->rx_urb_size = 3072; mii = &dev->mii; mii->dev = netdev; mii->mdio_read = sr_mdio_read; mii->mdio_write = sr_mdio_write; mii->phy_id_mask = 0x1f; mii->reg_num_mask = 0x1f; sr_write_reg(dev, NCR, NCR_RST); udelay(20); /* read MAC * After Chip Power on, the Chip will reload the MAC from * EEPROM automatically to PAR. In case there is no EEPROM externally, * a default MAC address is stored in PAR for making chip work properly. */ if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) { netdev_err(netdev, "Error reading MAC address\n"); ret = -ENODEV; goto out; } /* power up and reset phy */ sr_write_reg(dev, PRR, PRR_PHY_RST); /* at least 10ms, here 20ms for safe */ mdelay(20); sr_write_reg(dev, PRR, 0); /* at least 1ms, here 2ms for reading right register */ udelay(2 * 1000); /* receive broadcast packets */ sr9700_set_multicast(netdev); sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET); sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP); mii_nway_restart(mii); out: return ret; } static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct sk_buff *sr_skb; int len; /* skb content (packets) format : * p0 p1 p2 ...... pm * / \ * / \ * / \ * / \ * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn * * p0 : packet 0 * p0b0 : packet 0 byte 0 * * b0: rx status * b1: packet length (incl crc) low * b2: packet length (incl crc) high * b3..n-4: packet data * bn-3..bn: ethernet packet crc */ if (unlikely(skb->len < SR_RX_OVERHEAD)) { netdev_err(dev->net, "unexpected tiny rx frame\n"); return 0; } /* one skb may contains multiple packets */ while (skb->len > SR_RX_OVERHEAD) { if (skb->data[0] != 0x40) return 0; /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; if (len > ETH_FRAME_LEN) return 0; /* the last packet of current skb */ if (skb->len == (len + SR_RX_OVERHEAD)) { skb_pull(skb, 3); skb->len = len; skb_set_tail_pointer(skb, len); skb->truesize = len + sizeof(struct sk_buff); return 2; } /* skb_clone is used for address align */ sr_skb = skb_clone(skb, GFP_ATOMIC); if (!sr_skb) return 0; sr_skb->len = len; sr_skb->data = skb->data + 3; skb_set_tail_pointer(sr_skb, len); sr_skb->truesize = len + sizeof(struct sk_buff); usbnet_skb_return(dev, sr_skb); skb_pull(skb, len + SR_RX_OVERHEAD); }; return 0; } static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int len; /* SR9700 can only send out one ethernet packet at once. * * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn * * b0: rx status * b1: packet length (incl crc) low * b2: packet length (incl crc) high * b3..n-4: packet data * bn-3..bn: ethernet packet crc */ len = skb->len; if (skb_headroom(skb) < SR_TX_OVERHEAD) { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } __skb_push(skb, SR_TX_OVERHEAD); /* usbnet adds padding if length is a multiple of packet size * if so, adjust length value in header */ if ((skb->len % dev->maxpacket) == 0) len++; skb->data[0] = len; skb->data[1] = len >> 8; return skb; } static void sr9700_status(struct usbnet *dev, struct urb *urb) { int link; u8 *buf; /* format: b0: net status b1: tx status 1 b2: tx status 2 b3: rx status b4: rx overflow b5: rx count b6: tx count b7: gpr */ if (urb->actual_length < 8) return; buf = urb->transfer_buffer; link = !!(buf[0] & 0x40); if (netif_carrier_ok(dev->net) != link) { usbnet_link_change(dev, link, 1); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static int sr9700_link_reset(struct usbnet *dev) { struct ethtool_cmd ecmd; mii_check_media(&dev->mii, 1, 1); mii_ethtool_gset(&dev->mii, &ecmd); netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n", ecmd.speed, ecmd.duplex); return 0; } static const struct driver_info sr9700_driver_info = { .description = "CoreChip SR9700 USB Ethernet", .flags = FLAG_ETHER, .bind = sr9700_bind, .rx_fixup = sr9700_rx_fixup, .tx_fixup = sr9700_tx_fixup, .status = sr9700_status, .link_reset = sr9700_link_reset, .reset = sr9700_link_reset, }; static const struct usb_device_id products[] = { { USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */ .driver_info = (unsigned long)&sr9700_driver_info, }, {}, /* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver sr9700_usb_driver = { .name = "sr9700", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(sr9700_usb_driver); MODULE_AUTHOR("liujl <liujunliang_ljl@163.com>"); MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/"); MODULE_LICENSE("GPL");
gpl-2.0
kogone/AK-Angler
drivers/usb/host/ohci-nxp.c
2069
9935
/* * driver for NXP USB Host devices * * Currently supported OHCI host devices: * - NXP LPC32xx * * Authors: Dmitry Chigirev <source@mvista.com> * Vitaly Wool <vitalywool@gmail.com> * * register initialization is based on code examples provided by Philips * Copyright (c) 2005 Koninklijke Philips Electronics N.V. * * NOTE: This driver does not have suspend/resume functionality * This driver is intended for engineering development purposes only * * 2005-2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/of.h> #include <linux/usb/isp1301.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/io.h> #include <mach/platform.h> #include <mach/irqs.h> #define USB_CONFIG_BASE 0x31020000 #define PWRMAN_BASE 0x40004000 #define USB_CTRL IO_ADDRESS(PWRMAN_BASE + 0x64) /* USB_CTRL bit defines */ #define USB_SLAVE_HCLK_EN (1 << 24) #define USB_DEV_NEED_CLK_EN (1 << 22) #define USB_HOST_NEED_CLK_EN (1 << 21) #define PAD_CONTROL_LAST_DRIVEN (1 << 19) #define USB_OTG_STAT_CONTROL IO_ADDRESS(USB_CONFIG_BASE + 0x110) /* USB_OTG_STAT_CONTROL bit defines */ #define TRANSPARENT_I2C_EN (1 << 7) #define HOST_EN (1 << 0) /* On LPC32xx, those are undefined */ #ifndef start_int_set_falling_edge #define start_int_set_falling_edge(irq) #define start_int_set_rising_edge(irq) #define start_int_ack(irq) #define start_int_mask(irq) #define start_int_umask(irq) #endif static struct i2c_client *isp1301_i2c_client; extern int usb_disabled(void); static struct clk *usb_pll_clk; static struct clk *usb_dev_clk; static struct clk *usb_otg_clk; static void isp1301_configure_lpc32xx(void) { /* LPC32XX only supports DAT_SE0 USB mode */ /* This sequence is important */ /* Disable transparent UART mode first */ i2c_smbus_write_byte_data(isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), MC1_UART_EN); i2c_smbus_write_byte_data(isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~MC1_SPEED_REG); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_1, MC1_SPEED_REG); i2c_smbus_write_byte_data(isp1301_i2c_client, (ISP1301_I2C_MODE_CONTROL_2 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_2, (MC2_BI_DI | MC2_PSW_EN | MC2_SPD_SUSP_CTRL)); i2c_smbus_write_byte_data(isp1301_i2c_client, (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), ~0); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_MODE_CONTROL_1, MC1_DAT_SE0); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, (OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN)); i2c_smbus_write_byte_data(isp1301_i2c_client, (ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR), (OTG1_DM_PULLUP | OTG1_DP_PULLUP)); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_INTERRUPT_LATCH | ISP1301_I2C_REG_CLEAR_ADDR, ~0); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_INTERRUPT_FALLING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0); /* Enable usb_need_clk clock after transceiver is initialized */ __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); printk(KERN_INFO "ISP1301 Vendor ID : 0x%04x\n", i2c_smbus_read_word_data(isp1301_i2c_client, 0x00)); printk(KERN_INFO "ISP1301 Product ID : 0x%04x\n", i2c_smbus_read_word_data(isp1301_i2c_client, 0x02)); printk(KERN_INFO "ISP1301 Version ID : 0x%04x\n", i2c_smbus_read_word_data(isp1301_i2c_client, 0x14)); } static void isp1301_configure(void) { isp1301_configure_lpc32xx(); } static inline void isp1301_vbus_on(void) { i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1, OTG1_VBUS_DRV); } static inline void isp1301_vbus_off(void) { i2c_smbus_write_byte_data(isp1301_i2c_client, ISP1301_I2C_OTG_CONTROL_1 | ISP1301_I2C_REG_CLEAR_ADDR, OTG1_VBUS_DRV); } static void nxp_start_hc(void) { unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN; __raw_writel(tmp, USB_OTG_STAT_CONTROL); isp1301_vbus_on(); } static void nxp_stop_hc(void) { unsigned long tmp; isp1301_vbus_off(); tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN; __raw_writel(tmp, USB_OTG_STAT_CONTROL); } static int ohci_nxp_start(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); int ret; if ((ret = ohci_init(ohci)) < 0) return ret; if ((ret = ohci_run(ohci)) < 0) { dev_err(hcd->self.controller, "can't start\n"); ohci_stop(hcd); return ret; } return 0; } static const struct hc_driver ohci_nxp_hc_driver = { .description = hcd_name, .product_desc = "nxp OHCI", /* * generic hardware linkage */ .irq = ohci_irq, .flags = HCD_USB11 | HCD_MEMORY, .hcd_priv_size = sizeof(struct ohci_hcd), /* * basic lifecycle operations */ .start = ohci_nxp_start, .stop = ohci_stop, .shutdown = ohci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, /* * scheduling support */ .get_frame_number = ohci_get_frame, /* * root hub support */ .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, #ifdef CONFIG_PM .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif .start_port_reset = ohci_start_port_reset, }; static int usb_hcd_nxp_probe(struct platform_device *pdev) { struct usb_hcd *hcd = 0; struct ohci_hcd *ohci; const struct hc_driver *driver = &ohci_nxp_hc_driver; struct resource *res; int ret = 0, irq; struct device_node *isp1301_node; if (pdev->dev.of_node) { isp1301_node = of_parse_phandle(pdev->dev.of_node, "transceiver", 0); } else { isp1301_node = NULL; } isp1301_i2c_client = isp1301_get_client(isp1301_node); if (!isp1301_i2c_client) { return -EPROBE_DEFER; } pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name); if (usb_disabled()) { dev_err(&pdev->dev, "USB is disabled\n"); ret = -ENODEV; goto fail_disable; } /* Enable AHB slave USB clock, needed for further USB clock control */ __raw_writel(USB_SLAVE_HCLK_EN | PAD_CONTROL_LAST_DRIVEN, USB_CTRL); /* Enable USB PLL */ usb_pll_clk = clk_get(&pdev->dev, "ck_pll5"); if (IS_ERR(usb_pll_clk)) { dev_err(&pdev->dev, "failed to acquire USB PLL\n"); ret = PTR_ERR(usb_pll_clk); goto fail_pll; } ret = clk_enable(usb_pll_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB PLL\n"); goto fail_pllen; } ret = clk_set_rate(usb_pll_clk, 48000); if (ret < 0) { dev_err(&pdev->dev, "failed to set USB clock rate\n"); goto fail_rate; } /* Enable USB device clock */ usb_dev_clk = clk_get(&pdev->dev, "ck_usbd"); if (IS_ERR(usb_dev_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_dev_clk); goto fail_dev; } ret = clk_enable(usb_dev_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); goto fail_deven; } /* Enable USB otg clocks */ usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg"); if (IS_ERR(usb_otg_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_otg_clk); goto fail_otg; } __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); ret = clk_enable(usb_otg_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); goto fail_otgen; } isp1301_configure(); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; goto fail_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); goto fail_resource; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; goto fail_resource; } nxp_start_hc(); platform_set_drvdata(pdev, hcd); ohci = hcd_to_ohci(hcd); ohci_hcd_init(ohci); dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq); ret = usb_add_hcd(hcd, irq, 0); if (ret == 0) return ret; nxp_stop_hc(); fail_resource: usb_put_hcd(hcd); fail_hcd: clk_disable(usb_otg_clk); fail_otgen: clk_put(usb_otg_clk); fail_otg: clk_disable(usb_dev_clk); fail_deven: clk_put(usb_dev_clk); fail_dev: fail_rate: clk_disable(usb_pll_clk); fail_pllen: clk_put(usb_pll_clk); fail_pll: fail_disable: isp1301_i2c_client = NULL; return ret; } static int usb_hcd_nxp_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_remove_hcd(hcd); nxp_stop_hc(); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); clk_disable(usb_pll_clk); clk_put(usb_pll_clk); clk_disable(usb_dev_clk); clk_put(usb_dev_clk); i2c_unregister_device(isp1301_i2c_client); isp1301_i2c_client = NULL; platform_set_drvdata(pdev, NULL); return 0; } /* work with hotplug and coldplug */ MODULE_ALIAS("platform:usb-ohci"); #ifdef CONFIG_OF static const struct of_device_id usb_hcd_nxp_match[] = { { .compatible = "nxp,ohci-nxp" }, {}, }; MODULE_DEVICE_TABLE(of, usb_hcd_nxp_match); #endif static struct platform_driver usb_hcd_nxp_driver = { .driver = { .name = "usb-ohci", .owner = THIS_MODULE, .of_match_table = of_match_ptr(usb_hcd_nxp_match), }, .probe = usb_hcd_nxp_probe, .remove = usb_hcd_nxp_remove, };
gpl-2.0
savoca/zerofltetmo
fs/ubifs/shrinker.c
2069
9624
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* * This file implements UBIFS shrinker which evicts clean znodes from the TNC * tree when Linux VM needs more RAM. * * We do not implement any LRU lists to find oldest znodes to free because it * would add additional overhead to the file system fast paths. So the shrinker * just walks the TNC tree when searching for znodes to free. * * If the root of a TNC sub-tree is clean and old enough, then the children are * also clean and old enough. So the shrinker walks the TNC in level order and * dumps entire sub-trees. * * The age of znodes is just the time-stamp when they were last looked at. * The current shrinker first tries to evict old znodes, then young ones. * * Since the shrinker is global, it has to protect against races with FS * un-mounts, which is done by the 'ubifs_infos_lock' and 'c->umount_mutex'. */ #include "ubifs.h" /* List of all UBIFS file-system instances */ LIST_HEAD(ubifs_infos); /* * We number each shrinker run and record the number on the ubifs_info structure * so that we can easily work out which ubifs_info structures have already been * done by the current run. */ static unsigned int shrinker_run_no; /* Protects 'ubifs_infos' list */ DEFINE_SPINLOCK(ubifs_infos_lock); /* Global clean znode counter (for all mounted UBIFS instances) */ atomic_long_t ubifs_clean_zn_cnt; /** * shrink_tnc - shrink TNC tree. * @c: UBIFS file-system description object * @nr: number of znodes to free * @age: the age of znodes to free * @contention: if any contention, this is set to %1 * * This function traverses TNC tree and frees clean znodes. It does not free * clean znodes which younger then @age. Returns number of freed znodes. */ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) { int total_freed = 0; struct ubifs_znode *znode, *zprev; int time = get_seconds(); ubifs_assert(mutex_is_locked(&c->umount_mutex)); ubifs_assert(mutex_is_locked(&c->tnc_mutex)); if (!c->zroot.znode || atomic_long_read(&c->clean_zn_cnt) == 0) return 0; /* * Traverse the TNC tree in levelorder manner, so that it is possible * to destroy large sub-trees. Indeed, if a znode is old, then all its * children are older or of the same age. * * Note, we are holding 'c->tnc_mutex', so we do not have to lock the * 'c->space_lock' when _reading_ 'c->clean_zn_cnt', because it is * changed only when the 'c->tnc_mutex' is held. */ zprev = NULL; znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); while (znode && total_freed < nr && atomic_long_read(&c->clean_zn_cnt) > 0) { int freed; /* * If the znode is clean, but it is in the 'c->cnext' list, this * means that this znode has just been written to flash as a * part of commit and was marked clean. They will be removed * from the list at end commit. We cannot change the list, * because it is not protected by any mutex (design decision to * make commit really independent and parallel to main I/O). So * we just skip these znodes. * * Note, the 'clean_zn_cnt' counters are not updated until * after the commit, so the UBIFS shrinker does not report * the znodes which are in the 'c->cnext' list as freeable. * * Also note, if the root of a sub-tree is not in 'c->cnext', * then the whole sub-tree is not in 'c->cnext' as well, so it * is safe to dump whole sub-tree. */ if (znode->cnext) { /* * Very soon these znodes will be removed from the list * and become freeable. */ *contention = 1; } else if (!ubifs_zn_dirty(znode) && abs(time - znode->time) >= age) { if (znode->parent) znode->parent->zbranch[znode->iip].znode = NULL; else c->zroot.znode = NULL; freed = ubifs_destroy_tnc_subtree(znode); atomic_long_sub(freed, &ubifs_clean_zn_cnt); atomic_long_sub(freed, &c->clean_zn_cnt); total_freed += freed; znode = zprev; } if (unlikely(!c->zroot.znode)) break; zprev = znode; znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); cond_resched(); } return total_freed; } /** * shrink_tnc_trees - shrink UBIFS TNC trees. * @nr: number of znodes to free * @age: the age of znodes to free * @contention: if any contention, this is set to %1 * * This function walks the list of mounted UBIFS file-systems and frees clean * znodes which are older than @age, until at least @nr znodes are freed. * Returns the number of freed znodes. */ static int shrink_tnc_trees(int nr, int age, int *contention) { struct ubifs_info *c; struct list_head *p; unsigned int run_no; int freed = 0; spin_lock(&ubifs_infos_lock); do { run_no = ++shrinker_run_no; } while (run_no == 0); /* Iterate over all mounted UBIFS file-systems and try to shrink them */ p = ubifs_infos.next; while (p != &ubifs_infos) { c = list_entry(p, struct ubifs_info, infos_list); /* * We move the ones we do to the end of the list, so we stop * when we see one we have already done. */ if (c->shrinker_run_no == run_no) break; if (!mutex_trylock(&c->umount_mutex)) { /* Some un-mount is in progress, try next FS */ *contention = 1; p = p->next; continue; } /* * We're holding 'c->umount_mutex', so the file-system won't go * away. */ if (!mutex_trylock(&c->tnc_mutex)) { mutex_unlock(&c->umount_mutex); *contention = 1; p = p->next; continue; } spin_unlock(&ubifs_infos_lock); /* * OK, now we have TNC locked, the file-system cannot go away - * it is safe to reap the cache. */ c->shrinker_run_no = run_no; freed += shrink_tnc(c, nr, age, contention); mutex_unlock(&c->tnc_mutex); spin_lock(&ubifs_infos_lock); /* Get the next list element before we move this one */ p = p->next; /* * Move this one to the end of the list to provide some * fairness. */ list_move_tail(&c->infos_list, &ubifs_infos); mutex_unlock(&c->umount_mutex); if (freed >= nr) break; } spin_unlock(&ubifs_infos_lock); return freed; } /** * kick_a_thread - kick a background thread to start commit. * * This function kicks a background thread to start background commit. Returns * %-1 if a thread was kicked or there is another reason to assume the memory * will soon be freed or become freeable. If there are no dirty znodes, returns * %0. */ static int kick_a_thread(void) { int i; struct ubifs_info *c; /* * Iterate over all mounted UBIFS file-systems and find out if there is * already an ongoing commit operation there. If no, then iterate for * the second time and initiate background commit. */ spin_lock(&ubifs_infos_lock); for (i = 0; i < 2; i++) { list_for_each_entry(c, &ubifs_infos, infos_list) { long dirty_zn_cnt; if (!mutex_trylock(&c->umount_mutex)) { /* * Some un-mount is in progress, it will * certainly free memory, so just return. */ spin_unlock(&ubifs_infos_lock); return -1; } dirty_zn_cnt = atomic_long_read(&c->dirty_zn_cnt); if (!dirty_zn_cnt || c->cmt_state == COMMIT_BROKEN || c->ro_mount || c->ro_error) { mutex_unlock(&c->umount_mutex); continue; } if (c->cmt_state != COMMIT_RESTING) { spin_unlock(&ubifs_infos_lock); mutex_unlock(&c->umount_mutex); return -1; } if (i == 1) { list_move_tail(&c->infos_list, &ubifs_infos); spin_unlock(&ubifs_infos_lock); ubifs_request_bg_commit(c); mutex_unlock(&c->umount_mutex); return -1; } mutex_unlock(&c->umount_mutex); } } spin_unlock(&ubifs_infos_lock); return 0; } int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) { int nr = sc->nr_to_scan; int freed, contention = 0; long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); if (nr == 0) /* * Due to the way UBIFS updates the clean znode counter it may * temporarily be negative. */ return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; if (!clean_zn_cnt) { /* * No clean znodes, nothing to reap. All we can do in this case * is to kick background threads to start commit, which will * probably make clean znodes which, in turn, will be freeable. * And we return -1 which means will make VM call us again * later. */ dbg_tnc("no clean znodes, kick a thread"); return kick_a_thread(); } freed = shrink_tnc_trees(nr, OLD_ZNODE_AGE, &contention); if (freed >= nr) goto out; dbg_tnc("not enough old znodes, try to free young ones"); freed += shrink_tnc_trees(nr - freed, YOUNG_ZNODE_AGE, &contention); if (freed >= nr) goto out; dbg_tnc("not enough young znodes, free all"); freed += shrink_tnc_trees(nr - freed, 0, &contention); if (!freed && contention) { dbg_tnc("freed nothing, but contention"); return -1; } out: dbg_tnc("%d znodes were freed, requested %d", freed, nr); return freed; }
gpl-2.0
glepag1/sultan-kernel-bruce-linaro
drivers/watchdog/cpwd.c
3093
16676
/* cpwd.c - driver implementation for hardware watchdog * timers found on Sun Microsystems CP1400 and CP1500 boards. * * This device supports both the generic Linux watchdog * interface and Solaris-compatible ioctls as best it is * able. * * NOTE: CP1400 systems appear to have a defective intr_mask * register on the PLD, preventing the disabling of * timer interrupts. We use a timer to periodically * reset 'stopped' watchdogs on affected platforms. * * Copyright (c) 2000 Eric Brower (ebrower@usa.net) * Copyright (C) 2008 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/uaccess.h> #include <asm/irq.h> #include <asm/watchdog.h> #define DRIVER_NAME "cpwd" #define PFX DRIVER_NAME ": " #define WD_OBPNAME "watchdog" #define WD_BADMODEL "SUNW,501-5336" #define WD_BTIMEOUT (jiffies + (HZ * 1000)) #define WD_BLIMIT 0xFFFF #define WD0_MINOR 212 #define WD1_MINOR 213 #define WD2_MINOR 214 /* Internal driver definitions. */ #define WD0_ID 0 #define WD1_ID 1 #define WD2_ID 2 #define WD_NUMDEVS 3 #define WD_INTR_OFF 0 #define WD_INTR_ON 1 #define WD_STAT_INIT 0x01 /* Watchdog timer is initialized */ #define WD_STAT_BSTOP 0x02 /* Watchdog timer is brokenstopped */ #define WD_STAT_SVCD 0x04 /* Watchdog interrupt occurred */ /* Register value definitions */ #define WD0_INTR_MASK 0x01 /* Watchdog device interrupt masks */ #define WD1_INTR_MASK 0x02 #define WD2_INTR_MASK 0x04 #define WD_S_RUNNING 0x01 /* Watchdog device status running */ #define WD_S_EXPIRED 0x02 /* Watchdog device status expired */ struct cpwd { void __iomem *regs; spinlock_t lock; unsigned int irq; unsigned long timeout; bool enabled; bool reboot; bool broken; bool initialized; struct { struct miscdevice misc; void __iomem *regs; u8 intr_mask; u8 runstatus; u16 timeout; } devs[WD_NUMDEVS]; }; static DEFINE_MUTEX(cpwd_mutex); static struct cpwd *cpwd_device; /* Sun uses Altera PLD EPF8820ATC144-4 * providing three hardware watchdogs: * * 1) RIC - sends an interrupt when triggered * 2) XIR - asserts XIR_B_RESET when triggered, resets CPU * 3) POR - asserts POR_B_RESET when triggered, resets CPU, backplane, board * *** Timer register block definition (struct wd_timer_regblk) * * dcntr and limit registers (halfword access): * ------------------- * | 15 | ...| 1 | 0 | * ------------------- * |- counter val -| * ------------------- * dcntr - Current 16-bit downcounter value. * When downcounter reaches '0' watchdog expires. * Reading this register resets downcounter with * 'limit' value. * limit - 16-bit countdown value in 1/10th second increments. * Writing this register begins countdown with input value. * Reading from this register does not affect counter. * NOTES: After watchdog reset, dcntr and limit contain '1' * * status register (byte access): * --------------------------- * | 7 | ... | 2 | 1 | 0 | * --------------+------------ * |- UNUSED -| EXP | RUN | * --------------------------- * status- Bit 0 - Watchdog is running * Bit 1 - Watchdog has expired * *** PLD register block definition (struct wd_pld_regblk) * * intr_mask register (byte access): * --------------------------------- * | 7 | ... | 3 | 2 | 1 | 0 | * +-------------+------------------ * |- UNUSED -| WD3 | WD2 | WD1 | * --------------------------------- * WD3 - 1 == Interrupt disabled for watchdog 3 * WD2 - 1 == Interrupt disabled for watchdog 2 * WD1 - 1 == Interrupt disabled for watchdog 1 * * pld_status register (byte access): * UNKNOWN, MAGICAL MYSTERY REGISTER * */ #define WD_TIMER_REGSZ 16 #define WD0_OFF 0 #define WD1_OFF (WD_TIMER_REGSZ * 1) #define WD2_OFF (WD_TIMER_REGSZ * 2) #define PLD_OFF (WD_TIMER_REGSZ * 3) #define WD_DCNTR 0x00 #define WD_LIMIT 0x04 #define WD_STATUS 0x08 #define PLD_IMASK (PLD_OFF + 0x00) #define PLD_STATUS (PLD_OFF + 0x04) static struct timer_list cpwd_timer; static int wd0_timeout; static int wd1_timeout; static int wd2_timeout; module_param(wd0_timeout, int, 0); MODULE_PARM_DESC(wd0_timeout, "Default watchdog0 timeout in 1/10secs"); module_param(wd1_timeout, int, 0); MODULE_PARM_DESC(wd1_timeout, "Default watchdog1 timeout in 1/10secs"); module_param(wd2_timeout, int, 0); MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs"); MODULE_AUTHOR("Eric Brower <ebrower@usa.net>"); MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("watchdog"); static void cpwd_writew(u16 val, void __iomem *addr) { writew(cpu_to_le16(val), addr); } static u16 cpwd_readw(void __iomem *addr) { u16 val = readw(addr); return le16_to_cpu(val); } static void cpwd_writeb(u8 val, void __iomem *addr) { writeb(val, addr); } static u8 cpwd_readb(void __iomem *addr) { return readb(addr); } /* Enable or disable watchdog interrupts * Because of the CP1400 defect this should only be * called during initialzation or by wd_[start|stop]timer() * * index - sub-device index, or -1 for 'all' * enable - non-zero to enable interrupts, zero to disable */ static void cpwd_toggleintr(struct cpwd *p, int index, int enable) { unsigned char curregs = cpwd_readb(p->regs + PLD_IMASK); unsigned char setregs = (index == -1) ? (WD0_INTR_MASK | WD1_INTR_MASK | WD2_INTR_MASK) : (p->devs[index].intr_mask); if (enable == WD_INTR_ON) curregs &= ~setregs; else curregs |= setregs; cpwd_writeb(curregs, p->regs + PLD_IMASK); } /* Restarts timer with maximum limit value and * does not unset 'brokenstop' value. */ static void cpwd_resetbrokentimer(struct cpwd *p, int index) { cpwd_toggleintr(p, index, WD_INTR_ON); cpwd_writew(WD_BLIMIT, p->devs[index].regs + WD_LIMIT); } /* Timer method called to reset stopped watchdogs-- * because of the PLD bug on CP1400, we cannot mask * interrupts within the PLD so me must continually * reset the timers ad infinitum. */ static void cpwd_brokentimer(unsigned long data) { struct cpwd *p = (struct cpwd *) data; int id, tripped = 0; /* kill a running timer instance, in case we * were called directly instead of by kernel timer */ if (timer_pending(&cpwd_timer)) del_timer(&cpwd_timer); for (id = 0; id < WD_NUMDEVS; id++) { if (p->devs[id].runstatus & WD_STAT_BSTOP) { ++tripped; cpwd_resetbrokentimer(p, id); } } if (tripped) { /* there is at least one timer brokenstopped-- reschedule */ cpwd_timer.expires = WD_BTIMEOUT; add_timer(&cpwd_timer); } } /* Reset countdown timer with 'limit' value and continue countdown. * This will not start a stopped timer. */ static void cpwd_pingtimer(struct cpwd *p, int index) { if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) cpwd_readw(p->devs[index].regs + WD_DCNTR); } /* Stop a running watchdog timer-- the timer actually keeps * running, but the interrupt is masked so that no action is * taken upon expiration. */ static void cpwd_stoptimer(struct cpwd *p, int index) { if (cpwd_readb(p->devs[index].regs + WD_STATUS) & WD_S_RUNNING) { cpwd_toggleintr(p, index, WD_INTR_OFF); if (p->broken) { p->devs[index].runstatus |= WD_STAT_BSTOP; cpwd_brokentimer((unsigned long) p); } } } /* Start a watchdog timer with the specified limit value * If the watchdog is running, it will be restarted with * the provided limit value. * * This function will enable interrupts on the specified * watchdog. */ static void cpwd_starttimer(struct cpwd *p, int index) { if (p->broken) p->devs[index].runstatus &= ~WD_STAT_BSTOP; p->devs[index].runstatus &= ~WD_STAT_SVCD; cpwd_writew(p->devs[index].timeout, p->devs[index].regs + WD_LIMIT); cpwd_toggleintr(p, index, WD_INTR_ON); } static int cpwd_getstatus(struct cpwd *p, int index) { unsigned char stat = cpwd_readb(p->devs[index].regs + WD_STATUS); unsigned char intr = cpwd_readb(p->devs[index].regs + PLD_IMASK); unsigned char ret = WD_STOPPED; /* determine STOPPED */ if (!stat) return ret; /* determine EXPIRED vs FREERUN vs RUNNING */ else if (WD_S_EXPIRED & stat) { ret = WD_EXPIRED; } else if (WD_S_RUNNING & stat) { if (intr & p->devs[index].intr_mask) { ret = WD_FREERUN; } else { /* Fudge WD_EXPIRED status for defective CP1400-- * IF timer is running * AND brokenstop is set * AND an interrupt has been serviced * we are WD_EXPIRED. * * IF timer is running * AND brokenstop is set * AND no interrupt has been serviced * we are WD_FREERUN. */ if (p->broken && (p->devs[index].runstatus & WD_STAT_BSTOP)) { if (p->devs[index].runstatus & WD_STAT_SVCD) { ret = WD_EXPIRED; } else { /* we could as well pretend * we are expired */ ret = WD_FREERUN; } } else { ret = WD_RUNNING; } } } /* determine SERVICED */ if (p->devs[index].runstatus & WD_STAT_SVCD) ret |= WD_SERVICED; return ret; } static irqreturn_t cpwd_interrupt(int irq, void *dev_id) { struct cpwd *p = dev_id; /* Only WD0 will interrupt-- others are NMI and we won't * see them here.... */ spin_lock_irq(&p->lock); cpwd_stoptimer(p, WD0_ID); p->devs[WD0_ID].runstatus |= WD_STAT_SVCD; spin_unlock_irq(&p->lock); return IRQ_HANDLED; } static int cpwd_open(struct inode *inode, struct file *f) { struct cpwd *p = cpwd_device; mutex_lock(&cpwd_mutex); switch (iminor(inode)) { case WD0_MINOR: case WD1_MINOR: case WD2_MINOR: break; default: mutex_unlock(&cpwd_mutex); return -ENODEV; } /* Register IRQ on first open of device */ if (!p->initialized) { if (request_irq(p->irq, &cpwd_interrupt, IRQF_SHARED, DRIVER_NAME, p)) { printk(KERN_ERR PFX "Cannot register IRQ %d\n", p->irq); mutex_unlock(&cpwd_mutex); return -EBUSY; } p->initialized = true; } mutex_unlock(&cpwd_mutex); return nonseekable_open(inode, f); } static int cpwd_release(struct inode *inode, struct file *file) { return 0; } static long cpwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { static const struct watchdog_info info = { .options = WDIOF_SETTIMEOUT, .firmware_version = 1, .identity = DRIVER_NAME, }; void __user *argp = (void __user *)arg; struct inode *inode = file->f_path.dentry->d_inode; int index = iminor(inode) - WD0_MINOR; struct cpwd *p = cpwd_device; int setopt = 0; switch (cmd) { /* Generic Linux IOCTLs */ case WDIOC_GETSUPPORT: if (copy_to_user(argp, &info, sizeof(struct watchdog_info))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: if (put_user(0, (int __user *)argp)) return -EFAULT; break; case WDIOC_KEEPALIVE: cpwd_pingtimer(p, index); break; case WDIOC_SETOPTIONS: if (copy_from_user(&setopt, argp, sizeof(unsigned int))) return -EFAULT; if (setopt & WDIOS_DISABLECARD) { if (p->enabled) return -EINVAL; cpwd_stoptimer(p, index); } else if (setopt & WDIOS_ENABLECARD) { cpwd_starttimer(p, index); } else { return -EINVAL; } break; /* Solaris-compatible IOCTLs */ case WIOCGSTAT: setopt = cpwd_getstatus(p, index); if (copy_to_user(argp, &setopt, sizeof(unsigned int))) return -EFAULT; break; case WIOCSTART: cpwd_starttimer(p, index); break; case WIOCSTOP: if (p->enabled) return -EINVAL; cpwd_stoptimer(p, index); break; default: return -EINVAL; } return 0; } static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rval = -ENOIOCTLCMD; switch (cmd) { /* solaris ioctls are specific to this driver */ case WIOCSTART: case WIOCSTOP: case WIOCGSTAT: mutex_lock(&cpwd_mutex); rval = cpwd_ioctl(file, cmd, arg); mutex_unlock(&cpwd_mutex); break; /* everything else is handled by the generic compat layer */ default: break; } return rval; } static ssize_t cpwd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct inode *inode = file->f_path.dentry->d_inode; struct cpwd *p = cpwd_device; int index = iminor(inode); if (count) { cpwd_pingtimer(p, index); return 1; } return 0; } static ssize_t cpwd_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { return -EINVAL; } static const struct file_operations cpwd_fops = { .owner = THIS_MODULE, .unlocked_ioctl = cpwd_ioctl, .compat_ioctl = cpwd_compat_ioctl, .open = cpwd_open, .write = cpwd_write, .read = cpwd_read, .release = cpwd_release, .llseek = no_llseek, }; static int __devinit cpwd_probe(struct platform_device *op) { struct device_node *options; const char *str_prop; const void *prop_val; int i, err = -EINVAL; struct cpwd *p; if (cpwd_device) return -EINVAL; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) { printk(KERN_ERR PFX "Unable to allocate struct cpwd.\n"); goto out; } p->irq = op->archdata.irqs[0]; spin_lock_init(&p->lock); p->regs = of_ioremap(&op->resource[0], 0, 4 * WD_TIMER_REGSZ, DRIVER_NAME); if (!p->regs) { printk(KERN_ERR PFX "Unable to map registers.\n"); goto out_free; } options = of_find_node_by_path("/options"); err = -ENODEV; if (!options) { printk(KERN_ERR PFX "Unable to find /options node.\n"); goto out_iounmap; } prop_val = of_get_property(options, "watchdog-enable?", NULL); p->enabled = (prop_val ? true : false); prop_val = of_get_property(options, "watchdog-reboot?", NULL); p->reboot = (prop_val ? true : false); str_prop = of_get_property(options, "watchdog-timeout", NULL); if (str_prop) p->timeout = simple_strtoul(str_prop, NULL, 10); /* CP1400s seem to have broken PLD implementations-- the * interrupt_mask register cannot be written, so no timer * interrupts can be masked within the PLD. */ str_prop = of_get_property(op->dev.of_node, "model", NULL); p->broken = (str_prop && !strcmp(str_prop, WD_BADMODEL)); if (!p->enabled) cpwd_toggleintr(p, -1, WD_INTR_OFF); for (i = 0; i < WD_NUMDEVS; i++) { static const char *cpwd_names[] = { "RIC", "XIR", "POR" }; static int *parms[] = { &wd0_timeout, &wd1_timeout, &wd2_timeout }; struct miscdevice *mp = &p->devs[i].misc; mp->minor = WD0_MINOR + i; mp->name = cpwd_names[i]; mp->fops = &cpwd_fops; p->devs[i].regs = p->regs + (i * WD_TIMER_REGSZ); p->devs[i].intr_mask = (WD0_INTR_MASK << i); p->devs[i].runstatus &= ~WD_STAT_BSTOP; p->devs[i].runstatus |= WD_STAT_INIT; p->devs[i].timeout = p->timeout; if (*parms[i]) p->devs[i].timeout = *parms[i]; err = misc_register(&p->devs[i].misc); if (err) { printk(KERN_ERR "Could not register misc device for " "dev %d\n", i); goto out_unregister; } } if (p->broken) { init_timer(&cpwd_timer); cpwd_timer.function = cpwd_brokentimer; cpwd_timer.data = (unsigned long) p; cpwd_timer.expires = WD_BTIMEOUT; printk(KERN_INFO PFX "PLD defect workaround enabled for " "model " WD_BADMODEL ".\n"); } dev_set_drvdata(&op->dev, p); cpwd_device = p; err = 0; out: return err; out_unregister: for (i--; i >= 0; i--) misc_deregister(&p->devs[i].misc); out_iounmap: of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ); out_free: kfree(p); goto out; } static int __devexit cpwd_remove(struct platform_device *op) { struct cpwd *p = dev_get_drvdata(&op->dev); int i; for (i = 0; i < WD_NUMDEVS; i++) { misc_deregister(&p->devs[i].misc); if (!p->enabled) { cpwd_stoptimer(p, i); if (p->devs[i].runstatus & WD_STAT_BSTOP) cpwd_resetbrokentimer(p, i); } } if (p->broken) del_timer_sync(&cpwd_timer); if (p->initialized) free_irq(p->irq, p); of_iounmap(&op->resource[0], p->regs, 4 * WD_TIMER_REGSZ); kfree(p); cpwd_device = NULL; return 0; } static const struct of_device_id cpwd_match[] = { { .name = "watchdog", }, {}, }; MODULE_DEVICE_TABLE(of, cpwd_match); static struct platform_driver cpwd_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = cpwd_match, }, .probe = cpwd_probe, .remove = __devexit_p(cpwd_remove), }; static int __init cpwd_init(void) { return platform_driver_register(&cpwd_driver); } static void __exit cpwd_exit(void) { platform_driver_unregister(&cpwd_driver); } module_init(cpwd_init); module_exit(cpwd_exit);
gpl-2.0
HB72K/android_kernel_lgk10_mt6582
arch/frv/mm/cache-page.c
4629
1948
/* cache-page.c: whole-page cache wrangling functions for MMU linux * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/module.h> #include <asm/pgalloc.h> /*****************************************************************************/ /* * DCF takes a virtual address and the page may not currently have one * - temporarily hijack a kmap_atomic() slot and attach the page to it */ void flush_dcache_page(struct page *page) { unsigned long dampr2; void *vaddr; dampr2 = __get_DAMPR(2); vaddr = kmap_atomic_primary(page); frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE); kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); } } /* end flush_dcache_page() */ EXPORT_SYMBOL(flush_dcache_page); /*****************************************************************************/ /* * ICI takes a virtual address and the page may not currently have one * - so we temporarily attach the page to a bit of virtual space so that is can be flushed */ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long start, unsigned long len) { unsigned long dampr2; void *vaddr; dampr2 = __get_DAMPR(2); vaddr = kmap_atomic_primary(page); start = (start & ~PAGE_MASK) | (unsigned long) vaddr; frv_cache_wback_inv(start, start + len); kunmap_atomic_primary(vaddr); if (dampr2) { __set_DAMPR(2, dampr2); __set_IAMPR(2, dampr2); } } /* end flush_icache_user_range() */ EXPORT_SYMBOL(flush_icache_user_range);
gpl-2.0
xplodwild/packaged-linux-linaro-3.2-ci
arch/powerpc/kernel/power5+-pmu.c
5397
18899
/* * Performance counter support for POWER5+/++ (not POWER5) processors. * * Copyright 2009 Paul Mackerras, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/string.h> #include <asm/reg.h> #include <asm/cputable.h> /* * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3) */ #define PM_PMC_SH 20 /* PMC number (1-based) for direct events */ #define PM_PMC_MSK 0xf #define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH) #define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */ #define PM_UNIT_MSK 0xf #define PM_BYTE_SH 12 /* Byte number of event bus to use */ #define PM_BYTE_MSK 7 #define PM_GRS_SH 8 /* Storage subsystem mux select */ #define PM_GRS_MSK 7 #define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */ #define PM_PMCSEL_MSK 0x7f /* Values in PM_UNIT field */ #define PM_FPU 0 #define PM_ISU0 1 #define PM_IFU 2 #define PM_ISU1 3 #define PM_IDU 4 #define PM_ISU0_ALT 6 #define PM_GRS 7 #define PM_LSU0 8 #define PM_LSU1 0xc #define PM_LASTUNIT 0xc /* * Bits in MMCR1 for POWER5+ */ #define MMCR1_TTM0SEL_SH 62 #define MMCR1_TTM1SEL_SH 60 #define MMCR1_TTM2SEL_SH 58 #define MMCR1_TTM3SEL_SH 56 #define MMCR1_TTMSEL_MSK 3 #define MMCR1_TD_CP_DBG0SEL_SH 54 #define MMCR1_TD_CP_DBG1SEL_SH 52 #define MMCR1_TD_CP_DBG2SEL_SH 50 #define MMCR1_TD_CP_DBG3SEL_SH 48 #define MMCR1_GRS_L2SEL_SH 46 #define MMCR1_GRS_L2SEL_MSK 3 #define MMCR1_GRS_L3SEL_SH 44 #define MMCR1_GRS_L3SEL_MSK 3 #define MMCR1_GRS_MCSEL_SH 41 #define MMCR1_GRS_MCSEL_MSK 7 #define MMCR1_GRS_FABSEL_SH 39 #define MMCR1_GRS_FABSEL_MSK 3 #define MMCR1_PMC1_ADDER_SEL_SH 35 #define MMCR1_PMC2_ADDER_SEL_SH 34 #define MMCR1_PMC3_ADDER_SEL_SH 33 #define MMCR1_PMC4_ADDER_SEL_SH 32 #define MMCR1_PMC1SEL_SH 25 #define MMCR1_PMC2SEL_SH 17 #define MMCR1_PMC3SEL_SH 9 #define MMCR1_PMC4SEL_SH 1 #define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8) #define MMCR1_PMCSEL_MSK 0x7f /* * Layout of constraint bits: * 6666555555555544444444443333333333222222222211111111110000000000 * 3210987654321098765432109876543210987654321098765432109876543210 * [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><> * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1 * * NC - number of counters * 51: NC error 0x0008_0000_0000_0000 * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000 * * G0..G3 - GRS mux constraints * 46-47: GRS_L2SEL value * 44-45: GRS_L3SEL value * 41-44: GRS_MCSEL value * 39-40: GRS_FABSEL value * Note that these match up with their bit positions in MMCR1 * * T0 - TTM0 constraint * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000 * * T1 - TTM1 constraint * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000 * * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS * 33: UC3 error 0x02_0000_0000 * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000 * 31: ISU0 events needed 0x01_8000_0000 * 30: IDU|GRS events needed 0x00_4000_0000 * * B0 * 24-27: Byte 0 event source 0x0f00_0000 * Encoding as for the event code * * B1, B2, B3 * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources * * P6 * 11: P6 error 0x800 * 10-11: Count of events needing PMC6 * * P1..P5 * 0-9: Count of events needing PMC1..PMC5 */ static const int grsel_shift[8] = { MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH }; /* Masks and values for using events from the various units */ static unsigned long unit_cons[PM_LASTUNIT+1][2] = { [PM_FPU] = { 0x3200000000ul, 0x0100000000ul }, [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul }, [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul }, [PM_IFU] = { 0x3200000000ul, 0x2100000000ul }, [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul }, [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul }, }; static int power5p_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) { int pmc, byte, unit, sh; int bit, fmask; unsigned long mask = 0, value = 0; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; sh = (pmc - 1) * 2; mask |= 2 << sh; value |= 1 << sh; if (pmc >= 5 && !(event == 0x500009 || event == 0x600005)) return -1; } if (event & PM_BUSEVENT_MSK) { unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; if (unit > PM_LASTUNIT) return -1; if (unit == PM_ISU0_ALT) unit = PM_ISU0; mask |= unit_cons[unit][0]; value |= unit_cons[unit][1]; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; if (byte >= 4) { if (unit != PM_LSU1) return -1; /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */ ++unit; byte &= 3; } if (unit == PM_GRS) { bit = event & 7; fmask = (bit == 6)? 7: 3; sh = grsel_shift[bit]; mask |= (unsigned long)fmask << sh; value |= (unsigned long)((event >> PM_GRS_SH) & fmask) << sh; } /* Set byte lane select field */ mask |= 0xfUL << (24 - 4 * byte); value |= (unsigned long)unit << (24 - 4 * byte); } if (pmc < 5) { /* need a counter from PMC1-4 set */ mask |= 0x8000000000000ul; value |= 0x1000000000000ul; } *maskp = mask; *valp = value; return 0; } static int power5p_limited_pmc_event(u64 event) { int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; return pmc == 5 || pmc == 6; } #define MAX_ALT 3 /* at most 3 alternatives for any event */ static const unsigned int event_alternatives[][MAX_ALT] = { { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */ { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */ { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */ { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */ { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */ { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */ { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */ { 0x100005, 0x600005 }, /* PM_RUN_CYC */ { 0x100009, 0x200009 }, /* PM_INST_CMPL */ { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */ { 0x300009, 0x400009 }, /* PM_INST_DISP */ }; /* * Scan the alternatives table for a match and return the * index into the alternatives table if found, else -1. */ static int find_alternative(unsigned int event) { int i, j; for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) { if (event < event_alternatives[i][0]) break; for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j) if (event == event_alternatives[i][j]) return i; } return -1; } static const unsigned char bytedecode_alternatives[4][4] = { /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 }, /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e }, /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 }, /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e } }; /* * Some direct events for decodes of event bus byte 3 have alternative * PMCSEL values on other counters. This returns the alternative * event code for those that do, or -1 otherwise. This also handles * alternative PCMSEL values for add events. */ static s64 find_alternative_bdecode(u64 event) { int pmc, altpmc, pp, j; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; if (pmc == 0 || pmc > 4) return -1; altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */ pp = event & PM_PMCSEL_MSK; for (j = 0; j < 4; ++j) { if (bytedecode_alternatives[pmc - 1][j] == pp) { return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) | (altpmc << PM_PMC_SH) | bytedecode_alternatives[altpmc - 1][j]; } } /* new decode alternatives for power5+ */ if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) return event + (2 << PM_PMC_SH) + (0x2e - 0x0d); if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) return event - (2 << PM_PMC_SH) - (0x2e - 0x0d); /* alternative add event encodings */ if (pp == 0x10 || pp == 0x28) return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) | (altpmc << PM_PMC_SH); return -1; } static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[]) { int i, j, nalt = 1; int nlim; s64 ae; alt[0] = event; nalt = 1; nlim = power5p_limited_pmc_event(event); i = find_alternative(event); if (i >= 0) { for (j = 0; j < MAX_ALT; ++j) { ae = event_alternatives[i][j]; if (ae && ae != event) alt[nalt++] = ae; nlim += power5p_limited_pmc_event(ae); } } else { ae = find_alternative_bdecode(event); if (ae > 0) alt[nalt++] = ae; } if (flags & PPMU_ONLY_COUNT_RUN) { /* * We're only counting in RUN state, * so PM_CYC is equivalent to PM_RUN_CYC * and PM_INST_CMPL === PM_RUN_INST_CMPL. * This doesn't include alternatives that don't provide * any extra flexibility in assigning PMCs (e.g. * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC). * Note that even with these additional alternatives * we never end up with more than 3 alternatives for any event. */ j = nalt; for (i = 0; i < nalt; ++i) { switch (alt[i]) { case 0xf: /* PM_CYC */ alt[j++] = 0x600005; /* PM_RUN_CYC */ ++nlim; break; case 0x600005: /* PM_RUN_CYC */ alt[j++] = 0xf; break; case 0x100009: /* PM_INST_CMPL */ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */ ++nlim; break; case 0x500009: /* PM_RUN_INST_CMPL */ alt[j++] = 0x100009; /* PM_INST_CMPL */ alt[j++] = 0x200009; break; } } nalt = j; } if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) { /* remove the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (!power5p_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) { /* remove all but the limited PMC events */ j = 0; for (i = 0; i < nalt; ++i) { if (power5p_limited_pmc_event(alt[i])) { alt[j] = alt[i]; ++j; } } nalt = j; } return nalt; } /* * Map of which direct events on which PMCs are marked instruction events. * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event. * Bit 0 is set if it is marked for all PMCs. * The 0x80 bit indicates a byte decode PMCSEL value. */ static unsigned char direct_event_is_marked[0x28] = { 0, /* 00 */ 0x1f, /* 01 PM_IOPS_CMPL */ 0x2, /* 02 PM_MRK_GRP_DISP */ 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */ 0, /* 04 */ 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */ 0x80, /* 06 */ 0x80, /* 07 */ 0, 0, 0,/* 08 - 0a */ 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */ 0, /* 0c */ 0x80, /* 0d */ 0x80, /* 0e */ 0, /* 0f */ 0, /* 10 */ 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */ 0, /* 12 */ 0x10, /* 13 PM_MRK_GRP_CMPL */ 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */ 0x2, /* 15 PM_MRK_GRP_ISSUED */ 0x80, /* 16 */ 0x80, /* 17 */ 0, 0, 0, 0, 0, 0x80, /* 1d */ 0x80, /* 1e */ 0, /* 1f */ 0x80, /* 20 */ 0x80, /* 21 */ 0x80, /* 22 */ 0x80, /* 23 */ 0x80, /* 24 */ 0x80, /* 25 */ 0x80, /* 26 */ 0x80, /* 27 */ }; /* * Returns 1 if event counts things relating to marked instructions * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. */ static int power5p_marked_instr_event(u64 event) { int pmc, psel; int bit, byte, unit; u32 mask; pmc = (event >> PM_PMC_SH) & PM_PMC_MSK; psel = event & PM_PMCSEL_MSK; if (pmc >= 5) return 0; bit = -1; if (psel < sizeof(direct_event_is_marked)) { if (direct_event_is_marked[psel] & (1 << pmc)) return 1; if (direct_event_is_marked[psel] & 0x80) bit = 4; else if (psel == 0x08) bit = pmc - 1; else if (psel == 0x10) bit = 4 - pmc; else if (psel == 0x1b && (pmc == 1 || pmc == 3)) bit = 4; } else if ((psel & 0x48) == 0x40) { bit = psel & 7; } else if (psel == 0x28) { bit = pmc - 1; } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) { bit = 4; } if (!(event & PM_BUSEVENT_MSK) || bit == -1) return 0; byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK; unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK; if (unit == PM_LSU0) { /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */ mask = 0x5dff00; } else if (unit == PM_LSU1 && byte >= 4) { byte -= 4; /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */ mask = 0x5f11c000; } else return 0; return (mask >> (byte * 8 + bit)) & 1; } static int power5p_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[]) { unsigned long mmcr1 = 0; unsigned long mmcra = 0; unsigned int pmc, unit, byte, psel; unsigned int ttm; int i, isbus, bit, grsel; unsigned int pmc_inuse = 0; unsigned char busbyte[4]; unsigned char unituse[16]; int ttmuse; if (n_ev > 6) return -1; /* First pass to count resource use */ memset(busbyte, 0, sizeof(busbyte)); memset(unituse, 0, sizeof(unituse)); for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; if (pmc) { if (pmc > 6) return -1; if (pmc_inuse & (1 << (pmc - 1))) return -1; pmc_inuse |= 1 << (pmc - 1); } if (event[i] & PM_BUSEVENT_MSK) { unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; if (unit > PM_LASTUNIT) return -1; if (unit == PM_ISU0_ALT) unit = PM_ISU0; if (byte >= 4) { if (unit != PM_LSU1) return -1; ++unit; byte &= 3; } if (busbyte[byte] && busbyte[byte] != unit) return -1; busbyte[byte] = unit; unituse[unit] = 1; } } /* * Assign resources and set multiplexer selects. * * PM_ISU0 can go either on TTM0 or TTM1, but that's the only * choice we have to deal with. */ if (unituse[PM_ISU0] & (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) { unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */ unituse[PM_ISU0] = 0; } /* Set TTM[01]SEL fields. */ ttmuse = 0; for (i = PM_FPU; i <= PM_ISU1; ++i) { if (!unituse[i]) continue; if (ttmuse++) return -1; mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH; } ttmuse = 0; for (; i <= PM_GRS; ++i) { if (!unituse[i]) continue; if (ttmuse++) return -1; mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH; } if (ttmuse > 1) return -1; /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */ for (byte = 0; byte < 4; ++byte) { unit = busbyte[byte]; if (!unit) continue; if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) { /* get ISU0 through TTM1 rather than TTM0 */ unit = PM_ISU0_ALT; } else if (unit == PM_LSU1 + 1) { /* select lower word of LSU1 for this byte */ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte); } ttm = unit >> 2; mmcr1 |= (unsigned long)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte); } /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */ for (i = 0; i < n_ev; ++i) { pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK; unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK; byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK; psel = event[i] & PM_PMCSEL_MSK; isbus = event[i] & PM_BUSEVENT_MSK; if (!pmc) { /* Bus event or any-PMC direct event */ for (pmc = 0; pmc < 4; ++pmc) { if (!(pmc_inuse & (1 << pmc))) break; } if (pmc >= 4) return -1; pmc_inuse |= 1 << pmc; } else if (pmc <= 4) { /* Direct event */ --pmc; if (isbus && (byte & 2) && (psel == 8 || psel == 0x10 || psel == 0x28)) /* add events on higher-numbered bus */ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc); } else { /* Instructions or run cycles on PMC5/6 */ --pmc; } if (isbus && unit == PM_GRS) { bit = psel & 7; grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK; mmcr1 |= (unsigned long)grsel << grsel_shift[bit]; } if (power5p_marked_instr_event(event[i])) mmcra |= MMCRA_SAMPLE_ENABLE; if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1)) /* select alternate byte lane */ psel |= 0x10; if (pmc <= 3) mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc); hwc[i] = pmc; } /* Return MMCRx values */ mmcr[0] = 0; if (pmc_inuse & 1) mmcr[0] = MMCR0_PMC1CE; if (pmc_inuse & 0x3e) mmcr[0] |= MMCR0_PMCjCE; mmcr[1] = mmcr1; mmcr[2] = mmcra; return 0; } static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[]) { if (pmc <= 3) mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc)); } static int power5p_generic_events[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0xf, [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009, [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */ }; #define C(x) PERF_COUNT_HW_CACHE_##x /* * Table of generalized cache-related events. * 0 means not supported, -1 means nonsensical, other values * are event codes. */ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x1c10a8, 0x3c1088 }, [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 }, [C(OP_PREFETCH)] = { 0xc70e7, -1 }, }, [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { 0, 0 }, }, [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0 }, [C(OP_WRITE)] = { 0, 0 }, [C(OP_PREFETCH)] = { 0xc50c3, 0 }, }, [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0xc20e4, 0x800c4 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0, 0x800c0 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { 0x230e4, 0x230e5 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ [C(OP_READ)] = { -1, -1 }, [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, }; static struct power_pmu power5p_pmu = { .name = "POWER5+/++", .n_counter = 6, .max_alternatives = MAX_ALT, .add_fields = 0x7000000000055ul, .test_adder = 0x3000040000000ul, .compute_mmcr = power5p_compute_mmcr, .get_constraint = power5p_get_constraint, .get_alternatives = power5p_get_alternatives, .disable_pmc = power5p_disable_pmc, .limited_pmc_event = power5p_limited_pmc_event, .flags = PPMU_LIMITED_PMC5_6, .n_generic = ARRAY_SIZE(power5p_generic_events), .generic_events = power5p_generic_events, .cache_events = &power5p_cache_events, }; static int __init init_power5p_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++"))) return -ENODEV; return register_power_pmu(&power5p_pmu); } early_initcall(init_power5p_pmu);
gpl-2.0
SlimLG2/android_kernel_motorola_msm8992
arch/powerpc/mm/pgtable.c
6933
6881
/* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "mmu_decl.h" static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; } /* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that * on userspace PTEs */ static inline int pte_looks_normal(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_USER); } struct page * maybe_pte_to_page(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page; } #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { return pte; } #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; /* No exec permission in the first place, move on */ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; /* If the page clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return __pte(pte_val(pte) & ~_PAGE_EXEC); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { struct page *pg; /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it */ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif /* CONFIG_DEBUG_VM */ /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) goto bail; /* If the page is already clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) goto bail; /* Clean the page and set PG_arch_1 */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); bail: return __pte(pte_val(pte) | _PAGE_EXEC); } #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ /* * set_pte stores a linux PTE into the linux page table. */ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { #ifdef CONFIG_DEBUG_VM WARN_ON(pte_present(*ptep)); #endif /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. */ pte = set_pte_filter(pte, addr); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); } /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed; entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { if (!is_vm_hugetlb_page(vma)) assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); flush_tlb_page_nohash(vma, address); } return changed; } #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == &init_mm) return; pgd = mm->pgd + pgd_index(addr); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); BUG_ON(!pmd_present(*pmd)); assert_spin_locked(pte_lockptr(mm, pmd)); } #endif /* CONFIG_DEBUG_VM */
gpl-2.0
fortuna-dev/android_kernel_samsung_fortuna-common
arch/powerpc/mm/pgtable.c
6933
6881
/* * This file contains common routines for dealing with free of page tables * Along with common page table handling code * * Derived from arch/powerpc/mm/tlb_64.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "mmu_decl.h" static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; } /* We only try to do i/d cache coherency on stuff that looks like * reasonably "normal" PTEs. We currently require a PTE to be present * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that * on userspace PTEs */ static inline int pte_looks_normal(pte_t pte) { return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == (_PAGE_PRESENT | _PAGE_USER); } struct page * maybe_pte_to_page(pte_t pte) { unsigned long pfn = pte_pfn(pte); struct page *page; if (unlikely(!pfn_valid(pfn))) return NULL; page = pfn_to_page(pfn); if (PageReserved(page)) return NULL; return page; } #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 /* Server-style MMU handles coherency when hashing if HW exec permission * is supposed per page (currently 64-bit only). If not, then, we always * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec * support falls into the same category. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || cpu_has_feature(CPU_FTR_NOEXECUTE))) { struct page *pg = maybe_pte_to_page(pte); if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { #ifdef CONFIG_8xx /* On 8xx, cache control instructions (particularly * "dcbst" from flush_dcache_icache) fault as write * operation if there is an unpopulated TLB entry * for the address in question. To workaround that, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ /* 8xx doesn't care about PID, size or ind args */ _tlbil_va(addr, 0, 0, 0); #endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } } return pte; } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { return pte; } #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ /* Embedded type MMU with HW exec support. This is a bit more complicated * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ static pte_t set_pte_filter(pte_t pte, unsigned long addr) { struct page *pg; /* No exec permission in the first place, move on */ if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) return pte; /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) return pte; /* If the page clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) return pte; /* If it's an exec fault, we flush the cache and make it clean */ if (is_exec_fault()) { flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); return pte; } /* Else, we filter out _PAGE_EXEC */ return __pte(pte_val(pte) & ~_PAGE_EXEC); } static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, int dirty) { struct page *pg; /* So here, we only care about exec faults, as we use them * to recover lost _PAGE_EXEC and perform I$/D$ coherency * if necessary. Also if _PAGE_EXEC is already set, same deal, * we just bail out */ if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) return pte; #ifdef CONFIG_DEBUG_VM /* So this is an exec fault, _PAGE_EXEC is not set. If it was * an error we would have bailed out earlier in do_page_fault() * but let's make sure of it */ if (WARN_ON(!(vma->vm_flags & VM_EXEC))) return pte; #endif /* CONFIG_DEBUG_VM */ /* If you set _PAGE_EXEC on weird pages you're on your own */ pg = maybe_pte_to_page(pte); if (unlikely(!pg)) goto bail; /* If the page is already clean, we move on */ if (test_bit(PG_arch_1, &pg->flags)) goto bail; /* Clean the page and set PG_arch_1 */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); bail: return __pte(pte_val(pte) | _PAGE_EXEC); } #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ /* * set_pte stores a linux PTE into the linux page table. */ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { #ifdef CONFIG_DEBUG_VM WARN_ON(pte_present(*ptep)); #endif /* Note: mm->context.id might not yet have been assigned as * this context might not have been activated yet when this * is called. */ pte = set_pte_filter(pte, addr); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); } /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have * handled those two for us, we additionally deal with missing execute * permission here on some processors */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed; entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { if (!is_vm_hugetlb_page(vma)) assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(ptep, entry); flush_tlb_page_nohash(vma, address); } return changed; } #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == &init_mm) return; pgd = mm->pgd + pgd_index(addr); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, addr); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, addr); BUG_ON(!pmd_present(*pmd)); assert_spin_locked(pte_lockptr(mm, pmd)); } #endif /* CONFIG_DEBUG_VM */
gpl-2.0
jtpoo3/kernel_asus_flo
arch/sh/kernel/cpu/sh3/clock-sh7709.c
9237
2106
/* * arch/sh/kernel/cpu/sh3/clock-sh7709.c * * SH7709 support for the clock framework * * Copyright (C) 2005 Andriy Skulysh * * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c * Copyright (C) 2005 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/clock.h> #include <asm/freq.h> #include <asm/io.h> static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 }; static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; static void master_clk_init(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); clk->rate *= pfc_divisors[idx]; } static struct sh_clk_ops sh7709_master_clk_ops = { .init = master_clk_init, }; static unsigned long module_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); return clk->parent->rate / pfc_divisors[idx]; } static struct sh_clk_ops sh7709_module_clk_ops = { .recalc = module_clk_recalc, }; static unsigned long bus_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = (frqcr & 0x0080) ? ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; return clk->parent->rate * stc_multipliers[idx]; } static struct sh_clk_ops sh7709_bus_clk_ops = { .recalc = bus_clk_recalc, }; static unsigned long cpu_clk_recalc(struct clk *clk) { int frqcr = __raw_readw(FRQCR); int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); return clk->parent->rate / ifc_divisors[idx]; } static struct sh_clk_ops sh7709_cpu_clk_ops = { .recalc = cpu_clk_recalc, }; static struct sh_clk_ops *sh7709_clk_ops[] = { &sh7709_master_clk_ops, &sh7709_module_clk_ops, &sh7709_bus_clk_ops, &sh7709_cpu_clk_ops, }; void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) { if (idx < ARRAY_SIZE(sh7709_clk_ops)) *ops = sh7709_clk_ops[idx]; }
gpl-2.0
DecimalMan/dkp-tw
drivers/tc/tc.c
12053
5224
/* * TURBOchannel bus services. * * Copyright (c) Harald Koerfgen, 1998 * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki * Copyright (c) 2005 James Simmons * * This file is subject to the terms and conditions of the GNU * General Public License. See the file "COPYING" in the main * directory of this archive for more details. */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/tc.h> #include <linux/types.h> #include <asm/io.h> static struct tc_bus tc_bus = { .name = "TURBOchannel", }; /* * Probing for TURBOchannel modules. */ static void __init tc_bus_add_devices(struct tc_bus *tbus) { resource_size_t slotsize = tbus->info.slot_size << 20; resource_size_t extslotsize = tbus->ext_slot_size; resource_size_t slotaddr; resource_size_t extslotaddr; resource_size_t devsize; void __iomem *module; struct tc_dev *tdev; int i, slot, err; u8 pattern[4]; long offset; for (slot = 0; slot < tbus->num_tcslots; slot++) { slotaddr = tbus->slot_base + slot * slotsize; extslotaddr = tbus->ext_slot_base + slot * extslotsize; module = ioremap_nocache(slotaddr, slotsize); BUG_ON(!module); offset = TC_OLDCARD; err = 0; err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0); err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1); err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2); err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3); if (err) goto out_err; if (pattern[0] != 0x55 || pattern[1] != 0x00 || pattern[2] != 0xaa || pattern[3] != 0xff) { offset = TC_NEWCARD; err = 0; err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0); err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1); err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2); err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3); if (err) goto out_err; } if (pattern[0] != 0x55 || pattern[1] != 0x00 || pattern[2] != 0xaa || pattern[3] != 0xff) goto out_err; /* Found a board, allocate it an entry in the list */ tdev = kzalloc(sizeof(*tdev), GFP_KERNEL); if (!tdev) { printk(KERN_ERR "tc%x: unable to allocate tc_dev\n", slot); goto out_err; } dev_set_name(&tdev->dev, "tc%x", slot); tdev->bus = tbus; tdev->dev.parent = &tbus->dev; tdev->dev.bus = &tc_bus_type; tdev->slot = slot; for (i = 0; i < 8; i++) { tdev->firmware[i] = readb(module + offset + TC_FIRM_VER + 4 * i); tdev->vendor[i] = readb(module + offset + TC_VENDOR + 4 * i); tdev->name[i] = readb(module + offset + TC_MODULE + 4 * i); } tdev->firmware[8] = 0; tdev->vendor[8] = 0; tdev->name[8] = 0; pr_info("%s: %s %s %s\n", dev_name(&tdev->dev), tdev->vendor, tdev->name, tdev->firmware); devsize = readb(module + offset + TC_SLOT_SIZE); devsize <<= 22; if (devsize <= slotsize) { tdev->resource.start = slotaddr; tdev->resource.end = slotaddr + devsize - 1; } else if (devsize <= extslotsize) { tdev->resource.start = extslotaddr; tdev->resource.end = extslotaddr + devsize - 1; } else { printk(KERN_ERR "%s: Cannot provide slot space " "(%dMiB required, up to %dMiB supported)\n", dev_name(&tdev->dev), devsize >> 20, max(slotsize, extslotsize) >> 20); kfree(tdev); goto out_err; } tdev->resource.name = tdev->name; tdev->resource.flags = IORESOURCE_MEM; tc_device_get_irq(tdev); device_register(&tdev->dev); list_add_tail(&tdev->node, &tbus->devices); out_err: iounmap(module); } } /* * The main entry. */ static int __init tc_init(void) { /* Initialize the TURBOchannel bus */ if (tc_bus_get_info(&tc_bus)) return 0; INIT_LIST_HEAD(&tc_bus.devices); dev_set_name(&tc_bus.dev, "tc"); device_register(&tc_bus.dev); if (tc_bus.info.slot_size) { unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; pr_info("tc: TURBOchannel rev. %d at %d.%d MHz " "(with%s parity)\n", tc_bus.info.revision, tc_clock / 10, tc_clock % 10, tc_bus.info.parity ? "" : "out"); tc_bus.resource[0].start = tc_bus.slot_base; tc_bus.resource[0].end = tc_bus.slot_base + (tc_bus.info.slot_size << 20) * tc_bus.num_tcslots - 1; tc_bus.resource[0].name = tc_bus.name; tc_bus.resource[0].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[0]) < 0) { printk(KERN_ERR "tc: Cannot reserve resource\n"); return 0; } if (tc_bus.ext_slot_size) { tc_bus.resource[1].start = tc_bus.ext_slot_base; tc_bus.resource[1].end = tc_bus.ext_slot_base + tc_bus.ext_slot_size * tc_bus.num_tcslots - 1; tc_bus.resource[1].name = tc_bus.name; tc_bus.resource[1].flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, &tc_bus.resource[1]) < 0) { printk(KERN_ERR "tc: Cannot reserve resource\n"); release_resource(&tc_bus.resource[0]); return 0; } } tc_bus_add_devices(&tc_bus); } return 0; } subsys_initcall(tc_init);
gpl-2.0
TeamBliss-Devices/android_kernel_samsung_hlte
sound/soc/msm/qdsp6v2/q6core.c
22
7339
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/slab.h> #include <mach/msm_smd.h> #include <mach/qdsp6v2/apr.h> #include <mach/ocmem.h> #include <sound/q6core.h> #define TIMEOUT_MS 1000 /* * AVS bring up in the modem is optimitized for the new * Sub System Restart design and 100 milliseconds timeout * is sufficient to make sure the Q6 will be ready. */ #define Q6_READY_TIMEOUT_MS 100 struct q6core_str { struct apr_svc *core_handle_q; wait_queue_head_t bus_bw_req_wait; u32 bus_bw_resp_received; struct avcs_cmd_rsp_get_low_power_segments_info_t lp_ocm_payload; u32 param; }; static struct q6core_str q6core_lcl; static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv) { uint32_t *payload1; uint32_t nseg; int i, j; if (data == NULL) { pr_err("%s: data argument is null\n", __func__); return -EINVAL; } pr_debug("core msg: payload len = %u, apr resp opcode = 0x%X\n", data->payload_size, data->opcode); switch (data->opcode) { case APR_BASIC_RSP_RESULT:{ if (data->payload_size == 0) { pr_err("%s: APR_BASIC_RSP_RESULT No Payload ", __func__); return 0; } payload1 = data->payload; switch (payload1[0]) { case AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO: pr_info("%s: Cmd = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO status[0x%x]\n", __func__, payload1[1]); break; default: pr_err("Invalid cmd rsp[0x%x][0x%x]\n", payload1[0], payload1[1]); break; } break; } case AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO: payload1 = data->payload; pr_info("%s: cmd = AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO num_segments = 0x%x\n", __func__, payload1[0]); nseg = payload1[0]; q6core_lcl.lp_ocm_payload.num_segments = nseg; q6core_lcl.lp_ocm_payload.bandwidth = payload1[1]; for (i = 0, j = 2; i < nseg; i++) { q6core_lcl.lp_ocm_payload.mem_segment[i].type = (payload1[j] & 0xffff); q6core_lcl.lp_ocm_payload.mem_segment[i].category = ((payload1[j++] >> 16) & 0xffff); q6core_lcl.lp_ocm_payload.mem_segment[i].size = payload1[j++]; q6core_lcl.lp_ocm_payload. mem_segment[i].start_address_lsw = payload1[j++]; q6core_lcl.lp_ocm_payload. mem_segment[i].start_address_msw = payload1[j++]; } q6core_lcl.bus_bw_resp_received = 1; wake_up(&q6core_lcl.bus_bw_req_wait); break; case RESET_EVENTS:{ pr_debug("Reset event received in Core service"); apr_reset(q6core_lcl.core_handle_q); q6core_lcl.core_handle_q = NULL; break; } case AVCS_CMDRSP_ADSP_EVENT_GET_STATE: payload1 = data->payload; q6core_lcl.param = payload1[0]; pr_debug("%s: Received ADSP get state response 0x%x\n", __func__, q6core_lcl.param); /* ensure .param is updated prior to .bus_bw_resp_received */ wmb(); q6core_lcl.bus_bw_resp_received = 1; wake_up(&q6core_lcl.bus_bw_req_wait); break; default: pr_err("Message id from adsp core svc: %d\n", data->opcode); break; } return 0; } void ocm_core_open(void) { if (q6core_lcl.core_handle_q == NULL) q6core_lcl.core_handle_q = apr_register("ADSP", "CORE", aprv2_core_fn_q, 0xFFFFFFFF, NULL); pr_debug("Open_q %p\n", q6core_lcl.core_handle_q); if (q6core_lcl.core_handle_q == NULL) pr_err("%s: Unable to register CORE\n", __func__); } uint32_t core_set_dolby_manufacturer_id(int manufacturer_id) { struct adsp_dolby_manufacturer_id payload; int rc = 0; pr_debug("%s manufacturer_id :%d\n", __func__, manufacturer_id); ocm_core_open(); if (q6core_lcl.core_handle_q) { payload.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); payload.hdr.pkt_size = sizeof(struct adsp_dolby_manufacturer_id); payload.hdr.src_port = 0; payload.hdr.dest_port = 0; payload.hdr.token = 0; payload.hdr.opcode = ADSP_CMD_SET_DOLBY_MANUFACTURER_ID; payload.manufacturer_id = manufacturer_id; pr_debug("Send Dolby security opcode=%x manufacturer ID = %d\n", payload.hdr.opcode, payload.manufacturer_id); rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&payload); if (rc < 0) pr_err("%s: SET_DOLBY_MANUFACTURER_ID failed op[0x%x]rc[%d]\n", __func__, payload.hdr.opcode, rc); } return rc; } int core_get_low_power_segments( struct avcs_cmd_rsp_get_low_power_segments_info_t **lp_memseg) { struct avcs_cmd_get_low_power_segments_info lp_ocm_cmd; int ret = 0; pr_debug("%s: ", __func__); ocm_core_open(); if (q6core_lcl.core_handle_q == NULL) { pr_info("%s: apr registration for CORE failed\n", __func__); return -ENODEV; } lp_ocm_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); lp_ocm_cmd.hdr.pkt_size = sizeof(struct avcs_cmd_get_low_power_segments_info); lp_ocm_cmd.hdr.src_port = 0; lp_ocm_cmd.hdr.dest_port = 0; lp_ocm_cmd.hdr.token = 0; lp_ocm_cmd.hdr.opcode = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO; ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &lp_ocm_cmd); if (ret < 0) { pr_err("%s: CORE low power segment request failed\n", __func__); goto fail_cmd; } ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait, (q6core_lcl.bus_bw_resp_received == 1), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout for GET_LOW_POWER_SEGMENTS\n", __func__); ret = -ETIME; goto fail_cmd; } *lp_memseg = &q6core_lcl.lp_ocm_payload; return 0; fail_cmd: return ret; } bool q6core_is_adsp_ready(void) { int rc; bool ret = false; struct apr_hdr hdr; pr_debug("%s: enter\n", __func__); memset(&hdr, 0, sizeof(hdr)); hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0); hdr.opcode = AVCS_CMD_ADSP_EVENT_GET_STATE; ocm_core_open(); q6core_lcl.bus_bw_resp_received = 0; rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr); if (rc < 0) { pr_err("%s: Get ADSP state APR packet send event\n", __func__); goto bail; } rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait, (q6core_lcl.bus_bw_resp_received == 1), msecs_to_jiffies(Q6_READY_TIMEOUT_MS)); if (rc > 0 && q6core_lcl.bus_bw_resp_received) { /* ensure to read updated param by callback thread */ rmb(); ret = !!q6core_lcl.param; } bail: pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret); return ret; } static int __init core_init(void) { init_waitqueue_head(&q6core_lcl.bus_bw_req_wait); q6core_lcl.bus_bw_resp_received = 0; q6core_lcl.core_handle_q = NULL; return 0; } module_init(core_init); static void __exit core_exit(void) { } module_exit(core_exit); MODULE_DESCRIPTION("ADSP core driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
smarkwell/asuswrt-merlin
release/src/router/netatalk-3.0.5/libatalk/cnid/cnid_init.c
22
1826
/* * * Copyright (c) 2003 the Netatalk Team * Copyright (c) 2003 Rafal Lewczuk <rlewczuk@pronet.pl> * * This program is free software; you can redistribute and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation version 2 of the License or later * version if explicitly stated by any of above copyright holders. * */ /* * This file contains initialization stuff for CNID backends. * Currently it only employs static bindings. * No plans for dynamically loaded CNID backends here (temporary). * Maybe somewhere in the future. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* HAVE_CONFIG_H */ #include <atalk/cnid.h> #include <atalk/list.h> #include <atalk/logger.h> #include <stdlib.h> #ifdef CNID_BACKEND_DB3 extern struct _cnid_module cnid_db3_module; #endif #ifdef CNID_BACKEND_HASH extern struct _cnid_module cnid_hash_module; #endif #ifdef CNID_BACKEND_LAST extern struct _cnid_module cnid_last_module; #endif #ifdef CNID_BACKEND_MTAB extern struct _cnid_module cnid_mtab_module; #endif #ifdef CNID_BACKEND_CDB extern struct _cnid_module cnid_cdb_module; #endif #ifdef CNID_BACKEND_DBD extern struct _cnid_module cnid_dbd_module; #endif #ifdef CNID_BACKEND_TDB extern struct _cnid_module cnid_tdb_module; #endif void cnid_init(void) { #ifdef CNID_BACKEND_DB3 cnid_register(&cnid_db3_module); #endif #ifdef CNID_BACKEND_HASH cnid_register(&cnid_hash_module); #endif #ifdef CNID_BACKEND_LAST cnid_register(&cnid_last_module); #endif #ifdef CNID_BACKEND_MTAB cnid_register(&cnid_mtab_module); #endif #ifdef CNID_BACKEND_CDB cnid_register(&cnid_cdb_module); #endif #ifdef CNID_BACKEND_DBD cnid_register(&cnid_dbd_module); #endif #ifdef CNID_BACKEND_TDB cnid_register(&cnid_tdb_module); #endif }
gpl-2.0
BTDC/coreboot
src/vendorcode/amd/agesa/f10/Proc/HT/Fam10/htNbCoherentFam10.c
22
5607
/** * @file * * Coherent Family 10h Routines. * * Coherent feature Northbridge implementation specific to Family 10h processors. * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: HyperTransport * @e \$Revision: 44323 $ @e \$Date: 2010-12-22 01:24:58 -0700 (Wed, 22 Dec 2010) $ * */ /* ***************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * *************************************************************************** * */ /* *---------------------------------------------------------------------------- * MODULES USED * *---------------------------------------------------------------------------- */ #include "AGESA.h" #include "amdlib.h" #include "Ids.h" #include "Topology.h" #include "htFeat.h" #include "htNb.h" #include "htNbHardwareFam10.h" #include "htNbCoherentFam10.h" #include "Filecode.h" #define FILECODE PROC_HT_FAM10_HTNBCOHERENTFAM10_FILECODE /*---------------------------------------------------------------------------- * DEFINITIONS AND MACROS * *---------------------------------------------------------------------------- */ /*----------------------------------------------------------------------------------------*/ /** * Return whether the current configuration exceeds the capability. * * @HtNbMethod{::F_IS_EXCEEDED_CAPABLE}. * * Get Node capability and update the minimum supported system capability. * * @param[in] Node the Node * @param[in] State sysMpCap (updated) and NodesDiscovered * @param[in] Nb this northbridge * * @retval TRUE system is not capable of current config. * @retval FALSE system is capable of current config. */ BOOLEAN Fam10IsExceededCapable ( IN UINT8 Node, IN STATE_DATA *State, IN NORTHBRIDGE *Nb ) { UINT32 Temp; UINT8 MaxNodes; PCI_ADDR Reg; ASSERT (Node < MAX_NODES); Reg.AddressValue = MAKE_SBDFO (MakePciSegmentFromNode (Node), MakePciBusFromNode (Node), MakePciDeviceFromNode (Node), CPU_NB_FUNC_03, REG_NB_CAPABILITY_3XE8); LibAmdPciReadBits (Reg, 18, 16, &Temp, Nb->ConfigHandle); if (Temp != 0) { MaxNodes = (UINT8) (1 << (~Temp & 0x3)); // That is, 1, 2, 4, or 8 } else { MaxNodes = 8; } if (State->SysMpCap > MaxNodes) { State->SysMpCap = MaxNodes; } // Note since sysMpCap is one based and NodesDiscovered is zero based, equal returns true // return ((BOOLEAN) (State->SysMpCap <= State->NodesDiscovered)); } /** * Stop a link, so that it is isolated from a connected device. * * @HtNbMethod{::F_STOP_LINK}. * * Use is for fatal incompatible configurations, or for user interface * request to power off a link (IgnoreLink, SkipRegang). * Set ConnDly to make the power effective at the warm reset. * Set XMT and RCV off. * * @param[in] Node the node to stop a link on. * @param[in] Link the link to stop. * @param[in] State access to special routine for writing link control register * @param[in] Nb this northbridge. */ VOID Fam10StopLink ( IN UINT8 Node, IN UINT8 Link, IN STATE_DATA *State, IN NORTHBRIDGE *Nb ) { UINT32 Temp; PCI_ADDR Reg; // Set ConnDly Reg.AddressValue = MAKE_SBDFO (MakePciSegmentFromNode (Node), MakePciBusFromNode (Node), MakePciDeviceFromNode (Node), CPU_HTNB_FUNC_00, REG_LINK_GLOBAL_EXT_CONTROL_0x16C); Temp = 1; LibAmdPciWriteBits (Reg, 8, 8, &Temp, Nb->ConfigHandle); // Set TransOff and EndOfChain Reg = Nb->MakeLinkBase (Node, Link, Nb); Reg.Address.Register += HTHOST_LINK_CONTROL_REG; Temp = 3; State->HtFeatures->SetHtControlRegisterBits (Reg, 7, 6, &Temp, State); }
gpl-2.0
yubo/linux-2-6-32-220-23-1-el6
arch/arm/kernel/xscale-cp0.c
534
4032
/* * linux/arch/arm/kernel/xscale-cp0.c * * XScale DSP and iWMMXt coprocessor context switching and handling * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <asm/thread_notify.h> static inline void dsp_save_state(u32 *state) { __asm__ __volatile__ ( "mrrc p0, 0, %0, %1, c0\n" : "=r" (state[0]), "=r" (state[1])); } static inline void dsp_load_state(u32 *state) { __asm__ __volatile__ ( "mcrr p0, 0, %0, %1, c0\n" : : "r" (state[0]), "r" (state[1])); } static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: thread->cpu_context.extra[0] = 0; thread->cpu_context.extra[1] = 0; break; case THREAD_NOTIFY_SWITCH: dsp_save_state(current_thread_info()->cpu_context.extra); dsp_load_state(thread->cpu_context.extra); break; } return NOTIFY_DONE; } static struct notifier_block dsp_notifier_block = { .notifier_call = dsp_do, }; #ifdef CONFIG_IWMMXT static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: /* * flush_thread() zeroes thread->fpstate, so no need * to do anything here. * * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_RELEASE: iwmmxt_task_release(thread); break; case THREAD_NOTIFY_SWITCH: iwmmxt_task_switch(thread); break; } return NOTIFY_DONE; } static struct notifier_block iwmmxt_notifier_block = { .notifier_call = iwmmxt_do, }; #endif static u32 __init xscale_cp_access_read(void) { u32 value; __asm__ __volatile__ ( "mrc p15, 0, %0, c15, c1, 0\n\t" : "=r" (value)); return value; } static void __init xscale_cp_access_write(u32 value) { u32 temp; __asm__ __volatile__ ( "mcr p15, 0, %1, c15, c1, 0\n\t" "mrc p15, 0, %0, c15, c1, 0\n\t" "mov %0, %0\n\t" "sub pc, pc, #4\n\t" : "=r" (temp) : "r" (value)); } /* * Detect whether we have a MAC coprocessor (40 bit register) or an * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000 * into a coprocessor register and reading it back, and checking * whether the upper word survived intact. */ static int __init cpu_has_iwmmxt(void) { u32 lo; u32 hi; /* * This sequence is interpreted by the DSP coprocessor as: * mar acc0, %2, %3 * mra %0, %1, acc0 * * And by the iWMMXt coprocessor as: * tmcrr wR0, %2, %3 * tmrrc %0, %1, wR0 */ __asm__ __volatile__ ( "mcrr p0, 0, %2, %3, c0\n" "mrrc p0, 0, %0, %1, c0\n" : "=r" (lo), "=r" (hi) : "r" (0), "r" (0x100)); return !!hi; } /* * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy * switch code handle iWMMXt context switching. If on the other * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled * all the time, and save/restore acc0 on context switch in non-lazy * fashion. */ static int __init xscale_cp0_init(void) { u32 cp_access; cp_access = xscale_cp_access_read() & ~3; xscale_cp_access_write(cp_access | 1); if (cpu_has_iwmmxt()) { #ifndef CONFIG_IWMMXT printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor " "detected, but kernel support is missing.\n"); #else printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); #endif } else { printk(KERN_INFO "XScale DSP coprocessor detected.\n"); thread_register_notifier(&dsp_notifier_block); cp_access |= 1; } xscale_cp_access_write(cp_access); return 0; } late_initcall(xscale_cp0_init);
gpl-2.0
pedestre/Kernel-Apolo-JB-4.1.2
fs/notify/fanotify/fanotify_user.c
534
21979
#include <linux/fanotify.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/fsnotify_backend.h> #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <asm/ioctls.h> #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 extern const struct fsnotify_ops fanotify_fsnotify_ops; static struct kmem_cache *fanotify_mark_cache __read_mostly; static struct kmem_cache *fanotify_response_event_cache __read_mostly; struct fanotify_response_event { struct list_head list; __s32 fd; struct fsnotify_event *event; }; /* * Get an fsnotify notification event if one exists and is small * enough to fit in "count". Return an error pointer if the count * is not large enough. * * Called with the group->notification_mutex held. */ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, size_t count) { BUG_ON(!mutex_is_locked(&group->notification_mutex)); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); if (fsnotify_notify_queue_is_empty(group)) return NULL; if (FAN_EVENT_METADATA_LEN > count) return ERR_PTR(-EINVAL); /* held the notification_mutex the whole time, so this is the * same event we peeked above */ return fsnotify_remove_notify_event(group); } static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) { int client_fd; struct dentry *dentry; struct vfsmount *mnt; struct file *new_file; pr_debug("%s: group=%p event=%p\n", __func__, group, event); client_fd = get_unused_fd(); if (client_fd < 0) return client_fd; if (event->data_type != FSNOTIFY_EVENT_PATH) { WARN_ON(1); put_unused_fd(client_fd); return -EINVAL; } /* * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ dentry = dget(event->path.dentry); mnt = mntget(event->path.mnt); /* it's possible this event was an overflow event. in that case dentry and mnt * are NULL; That's fine, just don't call dentry open */ if (dentry && mnt) new_file = dentry_open(dentry, mnt, group->fanotify_data.f_flags | FMODE_NONOTIFY, current_cred()); else new_file = ERR_PTR(-EOVERFLOW); if (IS_ERR(new_file)) { /* * we still send an event even if we can't open the file. this * can happen when say tasks are gone and we try to open their * /proc files or we try to open a WRONLY file like in sysfs * we just send the errno to userspace since there isn't much * else we can do. */ put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { fd_install(client_fd, new_file); } return client_fd; } static int fill_event_metadata(struct fsnotify_group *group, struct fanotify_event_metadata *metadata, struct fsnotify_event *event) { int ret = 0; pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, group, metadata, event); metadata->event_len = FAN_EVENT_METADATA_LEN; metadata->metadata_len = FAN_EVENT_METADATA_LEN; metadata->vers = FANOTIFY_METADATA_VERSION; metadata->reserved = 0; metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; metadata->pid = pid_vnr(event->tgid); if (unlikely(event->mask & FAN_Q_OVERFLOW)) metadata->fd = FAN_NOFD; else { metadata->fd = create_fd(group, event); if (metadata->fd < 0) ret = metadata->fd; } return ret; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, __s32 fd) { struct fanotify_response_event *re, *return_re = NULL; mutex_lock(&group->fanotify_data.access_mutex); list_for_each_entry(re, &group->fanotify_data.access_list, list) { if (re->fd != fd) continue; list_del_init(&re->list); return_re = re; break; } mutex_unlock(&group->fanotify_data.access_mutex); pr_debug("%s: found return_re=%p\n", __func__, return_re); return return_re; } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct) { struct fanotify_response_event *re; __s32 fd = response_struct->fd; __u32 response = response_struct->response; pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, fd, response); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the * timeout */ switch (response) { case FAN_ALLOW: case FAN_DENY: break; default: return -EINVAL; } if (fd < 0) return -EINVAL; re = dequeue_re(group, fd); if (!re) return -ENOENT; re->event->response = response; wake_up(&group->fanotify_data.access_waitq); kmem_cache_free(fanotify_response_event_cache, re); return 0; } static int prepare_for_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { struct fanotify_response_event *re; if (!(event->mask & FAN_ALL_PERM_EVENTS)) return 0; re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); if (!re) return -ENOMEM; re->event = event; re->fd = fd; mutex_lock(&group->fanotify_data.access_mutex); if (atomic_read(&group->fanotify_data.bypass_perm)) { mutex_unlock(&group->fanotify_data.access_mutex); kmem_cache_free(fanotify_response_event_cache, re); event->response = FAN_ALLOW; return 0; } list_add_tail(&re->list, &group->fanotify_data.access_list); mutex_unlock(&group->fanotify_data.access_mutex); return 0; } static void remove_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { struct fanotify_response_event *re; if (!(event->mask & FAN_ALL_PERM_EVENTS)) return; re = dequeue_re(group, fd); if (!re) return; BUG_ON(re->event != event); kmem_cache_free(fanotify_response_event_cache, re); return; } #else static int prepare_for_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { return 0; } static void remove_access_response(struct fsnotify_group *group, struct fsnotify_event *event, __s32 fd) { return; } #endif static ssize_t copy_event_to_user(struct fsnotify_group *group, struct fsnotify_event *event, char __user *buf) { struct fanotify_event_metadata fanotify_event_metadata; int fd, ret; pr_debug("%s: group=%p event=%p\n", __func__, group, event); ret = fill_event_metadata(group, &fanotify_event_metadata, event); if (ret < 0) goto out; fd = fanotify_event_metadata.fd; ret = prepare_for_access_response(group, event, fd); if (ret) goto out_close_fd; ret = -EFAULT; if (copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len)) goto out_kill_access_response; return fanotify_event_metadata.event_len; out_kill_access_response: remove_access_response(group, event, fd); out_close_fd: if (fd != FAN_NOFD) sys_close(fd); out: #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS if (event->mask & FAN_ALL_PERM_EVENTS) { event->response = FAN_DENY; wake_up(&group->fanotify_data.access_waitq); } #endif return ret; } /* intofiy userspace file descriptor functions */ static unsigned int fanotify_poll(struct file *file, poll_table *wait) { struct fsnotify_group *group = file->private_data; int ret = 0; poll_wait(file, &group->notification_waitq, wait); mutex_lock(&group->notification_mutex); if (!fsnotify_notify_queue_is_empty(group)) ret = POLLIN | POLLRDNORM; mutex_unlock(&group->notification_mutex); return ret; } static ssize_t fanotify_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct fsnotify_group *group; struct fsnotify_event *kevent; char __user *start; int ret; DEFINE_WAIT(wait); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); while (1) { prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); mutex_lock(&group->notification_mutex); kevent = get_one_event(group, count); mutex_unlock(&group->notification_mutex); if (kevent) { ret = PTR_ERR(kevent); if (IS_ERR(kevent)) break; ret = copy_event_to_user(group, kevent, buf); fsnotify_put_event(kevent); if (ret < 0) break; buf += ret; count -= ret; continue; } ret = -EAGAIN; if (file->f_flags & O_NONBLOCK) break; ret = -ERESTARTSYS; if (signal_pending(current)) break; if (start != buf) break; schedule(); } finish_wait(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; } static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_response response = { .fd = -1, .response = -1 }; struct fsnotify_group *group; int ret; group = file->private_data; if (count > sizeof(response)) count = sizeof(response); pr_debug("%s: group=%p count=%zu\n", __func__, group, count); if (copy_from_user(&response, buf, count)) return -EFAULT; ret = process_access_response(group, &response); if (ret < 0) count = ret; return count; #else return -EINVAL; #endif } static int fanotify_release(struct inode *ignored, struct file *file) { struct fsnotify_group *group = file->private_data; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS struct fanotify_response_event *re, *lre; mutex_lock(&group->fanotify_data.access_mutex); atomic_inc(&group->fanotify_data.bypass_perm); list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, re, re->event); list_del_init(&re->list); re->event->response = FAN_ALLOW; kmem_cache_free(fanotify_response_event_cache, re); } mutex_unlock(&group->fanotify_data.access_mutex); wake_up(&group->fanotify_data.access_waitq); #endif /* matches the fanotify_init->fsnotify_alloc_group */ fsnotify_put_group(group); return 0; } static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; struct fsnotify_event_holder *holder; void __user *p; int ret = -ENOTTY; size_t send_len = 0; group = file->private_data; p = (void __user *) arg; switch (cmd) { case FIONREAD: mutex_lock(&group->notification_mutex); list_for_each_entry(holder, &group->notification_list, event_list) send_len += FAN_EVENT_METADATA_LEN; mutex_unlock(&group->notification_mutex); ret = put_user(send_len, (int __user *) p); break; } return ret; } static const struct file_operations fanotify_fops = { .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, .fasync = NULL, .release = fanotify_release, .unlocked_ioctl = fanotify_ioctl, .compat_ioctl = fanotify_ioctl, .llseek = noop_llseek, }; static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) { kmem_cache_free(fanotify_mark_cache, fsn_mark); } static int fanotify_find_path(int dfd, const char __user *filename, struct path *path, unsigned int flags) { int ret; pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, dfd, filename, flags); if (filename == NULL) { struct file *file; int fput_needed; ret = -EBADF; file = fget_light(dfd, &fput_needed); if (!file) goto out; ret = -ENOTDIR; if ((flags & FAN_MARK_ONLYDIR) && !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { fput_light(file, fput_needed); goto out; } *path = file->f_path; path_get(path); fput_light(file, fput_needed); } else { unsigned int lookup_flags = 0; if (!(flags & FAN_MARK_DONT_FOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & FAN_MARK_ONLYDIR) lookup_flags |= LOOKUP_DIRECTORY; ret = user_path_at(dfd, filename, lookup_flags, path); if (ret) goto out; } /* you can only watch an inode if you have read permissions on it */ ret = inode_permission(path->dentry->d_inode, MAY_READ); if (ret) path_put(path); out: return ret; } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags) { __u32 oldmask; spin_lock(&fsn_mark->lock); if (!(flags & FAN_MARK_IGNORED_MASK)) { oldmask = fsn_mark->mask; fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); } else { oldmask = fsn_mark->ignored_mask; fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); } spin_unlock(&fsn_mark->lock); if (!(oldmask & ~mask)) fsnotify_destroy_mark(fsn_mark); return mask & oldmask; } static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); if (!fsn_mark) return -ENOENT; removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); fsnotify_put_mark(fsn_mark); if (removed & mnt->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); return 0; } static int fanotify_remove_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) return -ENOENT; removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); /* matches the fsnotify_find_inode_mark() */ fsnotify_put_mark(fsn_mark); if (removed & inode->i_fsnotify_mask) fsnotify_recalc_inode_mask(inode); return 0; } static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, __u32 mask, unsigned int flags) { __u32 oldmask = -1; spin_lock(&fsn_mark->lock); if (!(flags & FAN_MARK_IGNORED_MASK)) { oldmask = fsn_mark->mask; fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); } else { __u32 tmask = fsn_mark->ignored_mask | mask; fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); if (flags & FAN_MARK_IGNORED_SURV_MODIFY) fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; } if (!(flags & FAN_MARK_ONDIR)) { __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR; fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask); } spin_unlock(&fsn_mark->lock); return mask & ~oldmask; } static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark; __u32 added; int ret = 0; fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); if (!fsn_mark) { if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) return -ENOSPC; fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fsn_mark) return -ENOMEM; fsnotify_init_mark(fsn_mark, fanotify_free_mark); ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); if (ret) goto err; } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); if (added & ~mnt->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); err: fsnotify_put_mark(fsn_mark); return ret; } static int fanotify_add_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark; __u32 added; int ret = 0; pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); /* * If some other task has this inode open for write we should not add * an ignored mark, unless that ignored mark is supposed to survive * modification changes anyway. */ if ((flags & FAN_MARK_IGNORED_MASK) && !(flags & FAN_MARK_IGNORED_SURV_MODIFY) && (atomic_read(&inode->i_writecount) > 0)) return 0; fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) { if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) return -ENOSPC; fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); if (!fsn_mark) return -ENOMEM; fsnotify_init_mark(fsn_mark, fanotify_free_mark); ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); if (ret) goto err; } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); if (added & ~inode->i_fsnotify_mask) fsnotify_recalc_inode_mask(inode); err: fsnotify_put_mark(fsn_mark); return ret; } /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; struct user_struct *user; pr_debug("%s: flags=%d event_f_flags=%d\n", __func__, flags, event_f_flags); if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (flags & ~FAN_ALL_INIT_FLAGS) return -EINVAL; user = get_current_user(); if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { free_uid(user); return -EMFILE; } f_flags = O_RDWR | FMODE_NONOTIFY; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) f_flags |= O_NONBLOCK; /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ group = fsnotify_alloc_group(&fanotify_fsnotify_ops); if (IS_ERR(group)) { free_uid(user); return PTR_ERR(group); } group->fanotify_data.user = user; atomic_inc(&user->fanotify_listeners); group->fanotify_data.f_flags = event_f_flags; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS mutex_init(&group->fanotify_data.access_mutex); init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); atomic_set(&group->fanotify_data.bypass_perm, 0); #endif switch (flags & FAN_ALL_CLASS_BITS) { case FAN_CLASS_NOTIF: group->priority = FS_PRIO_0; break; case FAN_CLASS_CONTENT: group->priority = FS_PRIO_1; break; case FAN_CLASS_PRE_CONTENT: group->priority = FS_PRIO_2; break; default: fd = -EINVAL; goto out_put_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_put_group; group->max_events = UINT_MAX; } else { group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; } if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out_put_group; group->fanotify_data.max_marks = UINT_MAX; } else { group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; } fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) goto out_put_group; return fd; out_put_group: fsnotify_put_group(group); return fd; } SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, __u64 mask, int dfd, const char __user * pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; struct file *filp; struct path path; int ret, fput_needed; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); /* we only use the lower 32 bits as of right now. */ if (mask & ((__u64)0xffffffff << 32)) return -EINVAL; if (flags & ~FAN_ALL_MARK_FLAGS) return -EINVAL; switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { case FAN_MARK_ADD: /* fallthrough */ case FAN_MARK_REMOVE: if (!mask) return -EINVAL; case FAN_MARK_FLUSH: break; default: return -EINVAL; } if (mask & FAN_ONDIR) { flags |= FAN_MARK_ONDIR; mask &= ~FAN_ONDIR; } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) #else if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) #endif return -EINVAL; filp = fget_light(fanotify_fd, &fput_needed); if (unlikely(!filp)) return -EBADF; /* verify that this is indeed an fanotify instance */ ret = -EINVAL; if (unlikely(filp->f_op != &fanotify_fops)) goto fput_and_out; group = filp->private_data; /* * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not * allowed to set permissions events. */ ret = -EINVAL; if (mask & FAN_ALL_PERM_EVENTS && group->priority == FS_PRIO_0) goto fput_and_out; ret = fanotify_find_path(dfd, pathname, &path, flags); if (ret) goto fput_and_out; /* inode held in place by reference to path; group by fget on fd */ if (!(flags & FAN_MARK_MOUNT)) inode = path.dentry->d_inode; else mnt = path.mnt; /* create/update an inode mark */ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { case FAN_MARK_ADD: if (flags & FAN_MARK_MOUNT) ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); else ret = fanotify_add_inode_mark(group, inode, mask, flags); break; case FAN_MARK_REMOVE: if (flags & FAN_MARK_MOUNT) ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); else ret = fanotify_remove_inode_mark(group, inode, mask, flags); break; case FAN_MARK_FLUSH: if (flags & FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); else fsnotify_clear_inode_marks_by_group(group); break; default: ret = -EINVAL; } path_put(&path); fput_and_out: fput_light(filp, fput_needed); return ret; } #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, long dfd, long pathname) { return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, mask, (int) dfd, (const char __user *) pathname); } SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); #endif /* * fanotify_user_setup - Our initialization function. Note that we cannot return * error because we have compiled-in VFS hooks. So an (unlikely) failure here * must result in panic(). */ static int __init fanotify_user_setup(void) { fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, SLAB_PANIC); return 0; } device_initcall(fanotify_user_setup);
gpl-2.0
lizan/isw11sc-kernel
drivers/usb/host/ehci-w90x900.c
1558
4003
/* * linux/driver/usb/host/ehci-w90x900.c * * Copyright (c) 2008 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation;version 2 of the License. * */ #include <linux/platform_device.h> /*ebable phy0 and phy1 for w90p910*/ #define ENPHY (0x01<<8) #define PHY0_CTR (0xA4) #define PHY1_CTR (0xA8) static int __devinit usb_w90x900_probe(const struct hc_driver *driver, struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; struct resource *res; int retval = 0, irq; unsigned long val; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -ENXIO; goto err1; } hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI"); if (!hcd) { retval = -ENOMEM; goto err1; } hcd->rsrc_start = res->start; hcd->rsrc_len = res->end - res->start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { retval = -EBUSY; goto err2; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { retval = -EFAULT; goto err3; } ehci = hcd_to_ehci(hcd); ehci->caps = hcd->regs; ehci->regs = hcd->regs + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); /* enable PHY 0,1,the regs only apply to w90p910 * 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of * w90p910 IC relative to ehci->regs. */ val = __raw_readl(ehci->regs+PHY0_CTR); val |= ENPHY; __raw_writel(val, ehci->regs+PHY0_CTR); val = __raw_readl(ehci->regs+PHY1_CTR); val |= ENPHY; __raw_writel(val, ehci->regs+PHY1_CTR); ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); ehci->sbrn = 0x20; irq = platform_get_irq(pdev, 0); if (irq < 0) goto err4; retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval != 0) goto err4; ehci_writel(ehci, 1, &ehci->regs->configured_flag); return retval; err4: iounmap(hcd->regs); err3: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err2: usb_put_hcd(hcd); err1: return retval; } static void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); } static const struct hc_driver ehci_w90x900_hc_driver = { .description = hcd_name, .product_desc = "Nuvoton w90x900 EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_USB2|HCD_MEMORY, /* * basic lifecycle operations */ .reset = ehci_init, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, #ifdef CONFIG_PM .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, }; static int __devinit ehci_w90x900_probe(struct platform_device *pdev) { if (usb_disabled()) return -ENODEV; return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev); } static int __devexit ehci_w90x900_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); usb_w90x900_remove(hcd, pdev); return 0; } static struct platform_driver ehci_hcd_w90x900_driver = { .probe = ehci_w90x900_probe, .remove = __devexit_p(ehci_w90x900_remove), .driver = { .name = "w90x900-ehci", .owner = THIS_MODULE, }, }; MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); MODULE_DESCRIPTION("w90p910 usb ehci driver!"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:w90p910-ehci");
gpl-2.0
darkspr1te/android_kernel_samsung_msm8916_a5
block/blk-integrity.c
2326
11874
/* * blk-integrity.c - Block layer data integrity extensions * * Copyright (C) 2007, 2008 Oracle Corporation * Written by: Martin K. Petersen <martin.petersen@oracle.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, * USA. * */ #include <linux/blkdev.h> #include <linux/mempool.h> #include <linux/bio.h> #include <linux/scatterlist.h> #include <linux/export.h> #include <linux/slab.h> #include "blk.h" static struct kmem_cache *integrity_cachep; static const char *bi_unsupported_name = "unsupported"; /** * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements * @q: request queue * @bio: bio with integrity metadata attached * * Description: Returns the number of elements required in a * scatterlist corresponding to the integrity metadata in a bio. */ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) { struct bio_vec *iv, *ivprv = NULL; unsigned int segments = 0; unsigned int seg_size = 0; unsigned int i = 0; bio_for_each_integrity_vec(iv, bio, i) { if (ivprv) { if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) goto new_segment; if (seg_size + iv->bv_len > queue_max_segment_size(q)) goto new_segment; seg_size += iv->bv_len; } else { new_segment: segments++; seg_size = iv->bv_len; } ivprv = iv; } return segments; } EXPORT_SYMBOL(blk_rq_count_integrity_sg); /** * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist * @q: request queue * @bio: bio with integrity metadata attached * @sglist: target scatterlist * * Description: Map the integrity vectors in request into a * scatterlist. The scatterlist must be big enough to hold all * elements. I.e. sized using blk_rq_count_integrity_sg(). */ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist) { struct bio_vec *iv, *ivprv = NULL; struct scatterlist *sg = NULL; unsigned int segments = 0; unsigned int i = 0; bio_for_each_integrity_vec(iv, bio, i) { if (ivprv) { if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) goto new_segment; if (sg->length + iv->bv_len > queue_max_segment_size(q)) goto new_segment; sg->length += iv->bv_len; } else { new_segment: if (!sg) sg = sglist; else { sg_unmark_end(sg); sg = sg_next(sg); } sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); segments++; } ivprv = iv; } if (sg) sg_mark_end(sg); return segments; } EXPORT_SYMBOL(blk_rq_map_integrity_sg); /** * blk_integrity_compare - Compare integrity profile of two disks * @gd1: Disk to compare * @gd2: Disk to compare * * Description: Meta-devices like DM and MD need to verify that all * sub-devices use the same integrity format before advertising to * upper layers that they can send/receive integrity metadata. This * function can be used to check whether two gendisk devices have * compatible integrity formats. */ int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) { struct blk_integrity *b1 = gd1->integrity; struct blk_integrity *b2 = gd2->integrity; if (!b1 && !b2) return 0; if (!b1 || !b2) return -1; if (b1->sector_size != b2->sector_size) { printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->sector_size, b2->sector_size); return -1; } if (b1->tuple_size != b2->tuple_size) { printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->tuple_size, b2->tuple_size); return -1; } if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, gd1->disk_name, gd2->disk_name, b1->tag_size, b2->tag_size); return -1; } if (strcmp(b1->name, b2->name)) { printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, gd1->disk_name, gd2->disk_name, b1->name, b2->name); return -1; } return 0; } EXPORT_SYMBOL(blk_integrity_compare); int blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) { if (blk_integrity_rq(req) != blk_integrity_rq(next)) return -1; if (req->nr_integrity_segments + next->nr_integrity_segments > q->limits.max_integrity_segments) return -1; return 0; } EXPORT_SYMBOL(blk_integrity_merge_rq); int blk_integrity_merge_bio(struct request_queue *q, struct request *req, struct bio *bio) { int nr_integrity_segs; struct bio *next = bio->bi_next; bio->bi_next = NULL; nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); bio->bi_next = next; if (req->nr_integrity_segments + nr_integrity_segs > q->limits.max_integrity_segments) return -1; req->nr_integrity_segments += nr_integrity_segs; return 0; } EXPORT_SYMBOL(blk_integrity_merge_bio); struct integrity_sysfs_entry { struct attribute attr; ssize_t (*show)(struct blk_integrity *, char *); ssize_t (*store)(struct blk_integrity *, const char *, size_t); }; static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); struct integrity_sysfs_entry *entry = container_of(attr, struct integrity_sysfs_entry, attr); return entry->show(bi, page); } static ssize_t integrity_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t count) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); struct integrity_sysfs_entry *entry = container_of(attr, struct integrity_sysfs_entry, attr); ssize_t ret = 0; if (entry->store) ret = entry->store(bi, page, count); return ret; } static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) { if (bi != NULL && bi->name != NULL) return sprintf(page, "%s\n", bi->name); else return sprintf(page, "none\n"); } static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) { if (bi != NULL) return sprintf(page, "%u\n", bi->tag_size); else return sprintf(page, "0\n"); } static ssize_t integrity_read_store(struct blk_integrity *bi, const char *page, size_t count) { char *p = (char *) page; unsigned long val = simple_strtoul(p, &p, 10); if (val) bi->flags |= INTEGRITY_FLAG_READ; else bi->flags &= ~INTEGRITY_FLAG_READ; return count; } static ssize_t integrity_read_show(struct blk_integrity *bi, char *page) { return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_READ) != 0); } static ssize_t integrity_write_store(struct blk_integrity *bi, const char *page, size_t count) { char *p = (char *) page; unsigned long val = simple_strtoul(p, &p, 10); if (val) bi->flags |= INTEGRITY_FLAG_WRITE; else bi->flags &= ~INTEGRITY_FLAG_WRITE; return count; } static ssize_t integrity_write_show(struct blk_integrity *bi, char *page) { return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_WRITE) != 0); } static struct integrity_sysfs_entry integrity_format_entry = { .attr = { .name = "format", .mode = S_IRUGO }, .show = integrity_format_show, }; static struct integrity_sysfs_entry integrity_tag_size_entry = { .attr = { .name = "tag_size", .mode = S_IRUGO }, .show = integrity_tag_size_show, }; static struct integrity_sysfs_entry integrity_read_entry = { .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, .show = integrity_read_show, .store = integrity_read_store, }; static struct integrity_sysfs_entry integrity_write_entry = { .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR }, .show = integrity_write_show, .store = integrity_write_store, }; static struct attribute *integrity_attrs[] = { &integrity_format_entry.attr, &integrity_tag_size_entry.attr, &integrity_read_entry.attr, &integrity_write_entry.attr, NULL, }; static const struct sysfs_ops integrity_ops = { .show = &integrity_attr_show, .store = &integrity_attr_store, }; static int __init blk_dev_integrity_init(void) { integrity_cachep = kmem_cache_create("blkdev_integrity", sizeof(struct blk_integrity), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_dev_integrity_init); static void blk_integrity_release(struct kobject *kobj) { struct blk_integrity *bi = container_of(kobj, struct blk_integrity, kobj); kmem_cache_free(integrity_cachep, bi); } static struct kobj_type integrity_ktype = { .default_attrs = integrity_attrs, .sysfs_ops = &integrity_ops, .release = blk_integrity_release, }; bool blk_integrity_is_initialized(struct gendisk *disk) { struct blk_integrity *bi = blk_get_integrity(disk); return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0); } EXPORT_SYMBOL(blk_integrity_is_initialized); /** * blk_integrity_register - Register a gendisk as being integrity-capable * @disk: struct gendisk pointer to make integrity-aware * @template: optional integrity profile to register * * Description: When a device needs to advertise itself as being able * to send/receive integrity metadata it must use this function to * register the capability with the block layer. The template is a * blk_integrity struct with values appropriate for the underlying * hardware. If template is NULL the new profile is allocated but * not filled out. See Documentation/block/data-integrity.txt. */ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) { struct blk_integrity *bi; BUG_ON(disk == NULL); if (disk->integrity == NULL) { bi = kmem_cache_alloc(integrity_cachep, GFP_KERNEL | __GFP_ZERO); if (!bi) return -1; if (kobject_init_and_add(&bi->kobj, &integrity_ktype, &disk_to_dev(disk)->kobj, "%s", "integrity")) { kmem_cache_free(integrity_cachep, bi); return -1; } kobject_uevent(&bi->kobj, KOBJ_ADD); bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; bi->sector_size = queue_logical_block_size(disk->queue); disk->integrity = bi; } else bi = disk->integrity; /* Use the provided profile as template */ if (template != NULL) { bi->name = template->name; bi->generate_fn = template->generate_fn; bi->verify_fn = template->verify_fn; bi->tuple_size = template->tuple_size; bi->set_tag_fn = template->set_tag_fn; bi->get_tag_fn = template->get_tag_fn; bi->tag_size = template->tag_size; } else bi->name = bi_unsupported_name; disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; return 0; } EXPORT_SYMBOL(blk_integrity_register); /** * blk_integrity_unregister - Remove block integrity profile * @disk: disk whose integrity profile to deallocate * * Description: This function frees all memory used by the block * integrity profile. To be called at device teardown. */ void blk_integrity_unregister(struct gendisk *disk) { struct blk_integrity *bi; if (!disk || !disk->integrity) return; disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; bi = disk->integrity; kobject_uevent(&bi->kobj, KOBJ_REMOVE); kobject_del(&bi->kobj); kobject_put(&bi->kobj); disk->integrity = NULL; } EXPORT_SYMBOL(blk_integrity_unregister);
gpl-2.0
lupohirp/GoldenKernel
net/802/fc.c
3094
3360
/* * NET3: Fibre Channel device handling subroutines * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Vineet Abraham <vma@iol.unh.edu> * v 1.0 03/22/99 */ #include <asm/uaccess.h> #include <asm/system.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/fcdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/net.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <net/arp.h> /* * Put the headers on a Fibre Channel packet. */ static int fc_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct fch_hdr *fch; int hdr_len; /* * Add the 802.2 SNAP header if IP as the IPv4 code calls * dev->hard_header directly. */ if (type == ETH_P_IP || type == ETH_P_ARP) { struct fcllc *fcllc; hdr_len = sizeof(struct fch_hdr) + sizeof(struct fcllc); fch = (struct fch_hdr *)skb_push(skb, hdr_len); fcllc = (struct fcllc *)(fch+1); fcllc->dsap = fcllc->ssap = EXTENDED_SAP; fcllc->llc = UI_CMD; fcllc->protid[0] = fcllc->protid[1] = fcllc->protid[2] = 0x00; fcllc->ethertype = htons(type); } else { hdr_len = sizeof(struct fch_hdr); fch = (struct fch_hdr *)skb_push(skb, hdr_len); } if(saddr) memcpy(fch->saddr,saddr,dev->addr_len); else memcpy(fch->saddr,dev->dev_addr,dev->addr_len); if(daddr) { memcpy(fch->daddr,daddr,dev->addr_len); return hdr_len; } return -hdr_len; } /* * A neighbour discovery of some species (eg arp) has completed. We * can now send the packet. */ static int fc_rebuild_header(struct sk_buff *skb) { #ifdef CONFIG_INET struct fch_hdr *fch=(struct fch_hdr *)skb->data; struct fcllc *fcllc=(struct fcllc *)(skb->data+sizeof(struct fch_hdr)); if(fcllc->ethertype != htons(ETH_P_IP)) { printk("fc_rebuild_header: Don't know how to resolve type %04X addresses ?\n", ntohs(fcllc->ethertype)); return 0; } return arp_find(fch->daddr, skb); #else return 0; #endif } static const struct header_ops fc_header_ops = { .create = fc_header, .rebuild = fc_rebuild_header, }; static void fc_setup(struct net_device *dev) { dev->header_ops = &fc_header_ops; dev->type = ARPHRD_IEEE802; dev->hard_header_len = FC_HLEN; dev->mtu = 2024; dev->addr_len = FC_ALEN; dev->tx_queue_len = 100; /* Long queues on fc */ dev->flags = IFF_BROADCAST; memset(dev->broadcast, 0xFF, FC_ALEN); } /** * alloc_fcdev - Register fibre channel device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this fibre channel device * * Fill in the fields of the device structure with fibre channel-generic values. * * Constructs a new net device, complete with a private data area of * size @sizeof_priv. A 32-byte (not bit) alignment is enforced for * this private data area. */ struct net_device *alloc_fcdev(int sizeof_priv) { return alloc_netdev(sizeof_priv, "fc%d", fc_setup); } EXPORT_SYMBOL(alloc_fcdev);
gpl-2.0
animania260/android_kernel_samsung_prevail2spr--Galaxy-Rush-
drivers/media/rc/keymaps/rc-asus-pc39.c
3094
2459
/* asus-pc39.h - Keytable for asus_pc39 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* * Marc Fargas <telenieko@telenieko.com> * this is the remote control that comes with the asus p7131 * which has a label saying is "Model PC-39" */ static struct rc_map_table asus_pc39[] = { /* Keys 0 to 9 */ { 0x082a, KEY_0 }, { 0x0816, KEY_1 }, { 0x0812, KEY_2 }, { 0x0814, KEY_3 }, { 0x0836, KEY_4 }, { 0x0832, KEY_5 }, { 0x0834, KEY_6 }, { 0x080e, KEY_7 }, { 0x080a, KEY_8 }, { 0x080c, KEY_9 }, { 0x0801, KEY_RADIO }, /* radio */ { 0x083c, KEY_MENU }, /* dvd/menu */ { 0x0815, KEY_VOLUMEUP }, { 0x0826, KEY_VOLUMEDOWN }, { 0x0808, KEY_UP }, { 0x0804, KEY_DOWN }, { 0x0818, KEY_LEFT }, { 0x0810, KEY_RIGHT }, { 0x081a, KEY_VIDEO }, /* video */ { 0x0806, KEY_AUDIO }, /* music */ { 0x081e, KEY_TV }, /* tv */ { 0x0822, KEY_EXIT }, /* back */ { 0x0835, KEY_CHANNELUP }, /* channel / program + */ { 0x0824, KEY_CHANNELDOWN }, /* channel / program - */ { 0x0825, KEY_ENTER }, /* enter */ { 0x0839, KEY_PAUSE }, /* play/pause */ { 0x0821, KEY_PREVIOUS }, /* rew */ { 0x0819, KEY_NEXT }, /* forward */ { 0x0831, KEY_REWIND }, /* backward << */ { 0x0805, KEY_FASTFORWARD }, /* forward >> */ { 0x0809, KEY_STOP }, { 0x0811, KEY_RECORD }, /* recording */ { 0x0829, KEY_POWER }, /* the button that reads "close" */ { 0x082e, KEY_ZOOM }, /* full screen */ { 0x082c, KEY_MACRO }, /* recall */ { 0x081c, KEY_HOME }, /* home */ { 0x083a, KEY_PVR }, /* picture */ { 0x0802, KEY_MUTE }, /* mute */ { 0x083e, KEY_DVD }, /* dvd */ }; static struct rc_map_list asus_pc39_map = { .map = { .scan = asus_pc39, .size = ARRAY_SIZE(asus_pc39), .rc_type = RC_TYPE_RC5, .name = RC_MAP_ASUS_PC39, } }; static int __init init_rc_map_asus_pc39(void) { return rc_map_register(&asus_pc39_map); } static void __exit exit_rc_map_asus_pc39(void) { rc_map_unregister(&asus_pc39_map); } module_init(init_rc_map_asus_pc39) module_exit(exit_rc_map_asus_pc39) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
jm199011/ef40s_kernel_4.2
drivers/media/rc/keymaps/rc-evga-indtube.c
3094
1531
/* evga-indtube.h - Keytable for evga_indtube Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* EVGA inDtube Devin Heitmueller <devin.heitmueller@gmail.com> */ static struct rc_map_table evga_indtube[] = { { 0x12, KEY_POWER}, { 0x02, KEY_MODE}, /* TV */ { 0x14, KEY_MUTE}, { 0x1a, KEY_CHANNELUP}, { 0x16, KEY_TV2}, /* PIP */ { 0x1d, KEY_VOLUMEUP}, { 0x05, KEY_CHANNELDOWN}, { 0x0f, KEY_PLAYPAUSE}, { 0x19, KEY_VOLUMEDOWN}, { 0x1c, KEY_REWIND}, { 0x0d, KEY_RECORD}, { 0x18, KEY_FORWARD}, { 0x1e, KEY_PREVIOUS}, { 0x1b, KEY_STOP}, { 0x1f, KEY_NEXT}, { 0x13, KEY_CAMERA}, }; static struct rc_map_list evga_indtube_map = { .map = { .scan = evga_indtube, .size = ARRAY_SIZE(evga_indtube), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_EVGA_INDTUBE, } }; static int __init init_rc_map_evga_indtube(void) { return rc_map_register(&evga_indtube_map); } static void __exit exit_rc_map_evga_indtube(void) { rc_map_unregister(&evga_indtube_map); } module_init(init_rc_map_evga_indtube) module_exit(exit_rc_map_evga_indtube) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0